diff --git a/Makefile b/Makefile index ebca5b1b02..f344b5cc3b 100644 --- a/Makefile +++ b/Makefile @@ -31,6 +31,7 @@ DOCKER_ENVS := \ -e DOCKER_BUILD_ARGS \ -e DOCKER_BUILD_GOGC \ -e DOCKER_BUILD_PKGS \ + -e DOCKER_BUILDKIT \ -e DOCKER_BASH_COMPLETION_PATH \ -e DOCKER_CLI_PATH \ -e DOCKER_DEBUG \ diff --git a/api/server/backend/build/backend.go b/api/server/backend/build/backend.go index 22ce9cef7c..546ad5f86d 100644 --- a/api/server/backend/build/backend.go +++ b/api/server/backend/build/backend.go @@ -8,10 +8,12 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/builder" + buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/fscache" "github.com/docker/docker/image" "github.com/docker/docker/pkg/stringid" "github.com/pkg/errors" + "golang.org/x/sync/errgroup" ) // ImageComponent provides an interface for working with images @@ -30,24 +32,39 @@ type Backend struct { builder Builder fsCache *fscache.FSCache imageComponent ImageComponent + buildkit *buildkit.Builder } // NewBackend creates a new build backend from components -func NewBackend(components ImageComponent, builder Builder, fsCache *fscache.FSCache) (*Backend, error) { - return &Backend{imageComponent: components, builder: builder, fsCache: fsCache}, nil +func NewBackend(components ImageComponent, builder Builder, fsCache *fscache.FSCache, buildkit *buildkit.Builder) (*Backend, error) { + return &Backend{imageComponent: components, builder: builder, fsCache: fsCache, buildkit: buildkit}, nil } // Build builds an image from a Source func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string, error) { options := config.Options + useBuildKit := options.Version == types.BuilderBuildKit + tagger, err := NewTagger(b.imageComponent, config.ProgressWriter.StdoutFormatter, options.Tags) if err != nil { return "", err } - build, err := b.builder.Build(ctx, config) - if err != nil { - return "", err + var build *builder.Result + if useBuildKit { + build, err = b.buildkit.Build(ctx, config) + if err != nil { + return "", err + } + } else { + build, err = b.builder.Build(ctx, config) + if err != nil { + return "", err + } + } + + if build == nil { + return "", nil } var imageID = build.ImageID @@ -62,19 +79,48 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string } } - stdout := config.ProgressWriter.StdoutFormatter - fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID)) - err = tagger.TagImages(image.ID(imageID)) + if !useBuildKit { + stdout := config.ProgressWriter.StdoutFormatter + fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID)) + err = tagger.TagImages(image.ID(imageID)) + } return imageID, err } // PruneCache removes all cached build sources func (b *Backend) PruneCache(ctx context.Context) (*types.BuildCachePruneReport, error) { - size, err := b.fsCache.Prune(ctx) - if err != nil { - return nil, errors.Wrap(err, "failed to prune build cache") + eg, ctx := errgroup.WithContext(ctx) + + var fsCacheSize uint64 + eg.Go(func() error { + var err error + fsCacheSize, err = b.fsCache.Prune(ctx) + if err != nil { + return errors.Wrap(err, "failed to prune fscache") + } + return nil + }) + + var buildCacheSize int64 + eg.Go(func() error { + var err error + buildCacheSize, err = b.buildkit.Prune(ctx) + if err != nil { + return errors.Wrap(err, "failed to prune build cache") + } + return nil + }) + + if err := eg.Wait(); err != nil { + return nil, err } - return &types.BuildCachePruneReport{SpaceReclaimed: size}, nil + + return &types.BuildCachePruneReport{SpaceReclaimed: fsCacheSize + uint64(buildCacheSize)}, nil +} + +// Cancel cancels the build by ID +func (b *Backend) Cancel(ctx context.Context, id string) error { + return b.buildkit.Cancel(ctx, id) } func squashBuild(build *builder.Result, imageComponent ImageComponent) (string, error) { diff --git a/api/server/router/build/backend.go b/api/server/router/build/backend.go index d82ef63af1..2ceae9d946 100644 --- a/api/server/router/build/backend.go +++ b/api/server/router/build/backend.go @@ -15,6 +15,8 @@ type Backend interface { // Prune build cache PruneCache(context.Context) (*types.BuildCachePruneReport, error) + + Cancel(context.Context, string) error } type experimentalProvider interface { diff --git a/api/server/router/build/build.go b/api/server/router/build/build.go index dc13a10602..811cd39181 100644 --- a/api/server/router/build/build.go +++ b/api/server/router/build/build.go @@ -25,5 +25,6 @@ func (r *buildRouter) initRoutes() { r.routes = []router.Route{ router.NewPostRoute("/build", r.postBuild, router.WithCancel), router.NewPostRoute("/build/prune", r.postPrune, router.WithCancel), + router.NewPostRoute("/build/cancel", r.postCancel), } } diff --git a/api/server/router/build/build_routes.go b/api/server/router/build/build_routes.go index 3e3668c42b..2d73e9b1f3 100644 --- a/api/server/router/build/build_routes.go +++ b/api/server/router/build/build_routes.go @@ -1,6 +1,7 @@ package build // import "github.com/docker/docker/api/server/router/build" import ( + "bufio" "bytes" "context" "encoding/base64" @@ -145,10 +146,26 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui options.CacheFrom = cacheFrom } options.SessionID = r.FormValue("session") + options.BuildID = r.FormValue("buildid") + builderVersion, err := parseVersion(r.FormValue("version")) + if err != nil { + return nil, err + } + options.Version = builderVersion return options, nil } +func parseVersion(s string) (types.BuilderVersion, error) { + if s == "" || s == string(types.BuilderV1) { + return types.BuilderV1, nil + } + if s == string(types.BuilderBuildKit) { + return types.BuilderBuildKit, nil + } + return "", errors.Errorf("invalid version %s", s) +} + func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { report, err := br.backend.PruneCache(ctx) if err != nil { @@ -157,6 +174,17 @@ func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r * return httputils.WriteJSON(w, http.StatusOK, report) } +func (br *buildRouter) postCancel(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.Header().Set("Content-Type", "application/json") + + id := r.FormValue("id") + if id == "" { + return errors.Errorf("build ID not provided") + } + + return br.backend.Cancel(ctx, id) +} + func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var ( notVerboseBuffer = bytes.NewBuffer(nil) @@ -165,18 +193,34 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r * w.Header().Set("Content-Type", "application/json") - output := ioutils.NewWriteFlusher(w) + body := r.Body + var ww io.Writer = w + if body != nil { + // there is a possibility that output is written before request body + // has been fully read so we need to protect against it. + // this can be removed when + // https://github.com/golang/go/issues/15527 + // https://github.com/golang/go/issues/22209 + // has been fixed + body, ww = wrapOutputBufferedUntilRequestRead(body, ww) + } + + output := ioutils.NewWriteFlusher(ww) defer output.Close() + errf := func(err error) error { + if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 { output.Write(notVerboseBuffer.Bytes()) } + + logrus.Debugf("isflushed %v", output.Flushed()) // Do not write the error in the http output if it's still empty. // This prevents from writing a 200(OK) when there is an internal error. if !output.Flushed() { return err } - _, err = w.Write(streamformatter.FormatError(err)) + _, err = output.Write(streamformatter.FormatError(err)) if err != nil { logrus.Warnf("could not write error response: %v", err) } @@ -205,10 +249,14 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r * return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", buildOptions.RemoteContext) } + if buildOptions.Version == types.BuilderBuildKit && !br.daemon.HasExperimental() { + return errdefs.InvalidParameter(errors.New("buildkit is only supported with experimental mode")) + } + wantAux := versions.GreaterThanOrEqualTo(version, "1.30") imgID, err := br.backend.Build(ctx, backend.BuildConfig{ - Source: r.Body, + Source: body, Options: buildOptions, ProgressWriter: buildProgressWriter(out, wantAux, createProgressReader), }) @@ -267,3 +315,102 @@ func buildProgressWriter(out io.Writer, wantAux bool, createProgressReader func( ProgressReaderFunc: createProgressReader, } } + +type flusher interface { + Flush() +} + +func wrapOutputBufferedUntilRequestRead(rc io.ReadCloser, out io.Writer) (io.ReadCloser, io.Writer) { + var fl flusher = &ioutils.NopFlusher{} + if f, ok := out.(flusher); ok { + fl = f + } + + w := &wcf{ + buf: bytes.NewBuffer(nil), + Writer: out, + flusher: fl, + } + r := bufio.NewReader(rc) + _, err := r.Peek(1) + if err != nil { + return rc, out + } + rc = &rcNotifier{ + Reader: r, + Closer: rc, + notify: w.notify, + } + return rc, w +} + +type rcNotifier struct { + io.Reader + io.Closer + notify func() +} + +func (r *rcNotifier) Read(b []byte) (int, error) { + n, err := r.Reader.Read(b) + if err != nil { + r.notify() + } + return n, err +} + +func (r *rcNotifier) Close() error { + r.notify() + return r.Closer.Close() +} + +type wcf struct { + io.Writer + flusher + mu sync.Mutex + ready bool + buf *bytes.Buffer + flushed bool +} + +func (w *wcf) Flush() { + w.mu.Lock() + w.flushed = true + if !w.ready { + w.mu.Unlock() + return + } + w.mu.Unlock() + w.flusher.Flush() +} + +func (w *wcf) Flushed() bool { + w.mu.Lock() + b := w.flushed + w.mu.Unlock() + return b +} + +func (w *wcf) Write(b []byte) (int, error) { + w.mu.Lock() + if !w.ready { + n, err := w.buf.Write(b) + w.mu.Unlock() + return n, err + } + w.mu.Unlock() + return w.Writer.Write(b) +} + +func (w *wcf) notify() { + w.mu.Lock() + if !w.ready { + if w.buf.Len() > 0 { + io.Copy(w.Writer, w.buf) + } + if w.flushed { + w.flusher.Flush() + } + w.ready = true + } + w.mu.Unlock() +} diff --git a/api/server/router/system/system.go b/api/server/router/system/system.go index ebb840a899..11370584ef 100644 --- a/api/server/router/system/system.go +++ b/api/server/router/system/system.go @@ -2,6 +2,7 @@ package system // import "github.com/docker/docker/api/server/router/system" import ( "github.com/docker/docker/api/server/router" + buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/fscache" ) @@ -11,15 +12,17 @@ type systemRouter struct { backend Backend cluster ClusterBackend routes []router.Route - builder *fscache.FSCache + fscache *fscache.FSCache // legacy + builder *buildkit.Builder } // NewRouter initializes a new system router -func NewRouter(b Backend, c ClusterBackend, fscache *fscache.FSCache) router.Router { +func NewRouter(b Backend, c ClusterBackend, fscache *fscache.FSCache, builder *buildkit.Builder) router.Router { r := &systemRouter{ backend: b, cluster: c, - builder: fscache, + fscache: fscache, + builder: builder, } r.routes = []router.Route{ diff --git a/api/server/router/system/system_routes.go b/api/server/router/system/system_routes.go index 573496886c..82b649e98f 100644 --- a/api/server/router/system/system_routes.go +++ b/api/server/router/system/system_routes.go @@ -17,6 +17,7 @@ import ( "github.com/docker/docker/pkg/ioutils" pkgerrors "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" ) func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { @@ -69,15 +70,45 @@ func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r } func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - du, err := s.backend.SystemDiskUsage(ctx) - if err != nil { + eg, ctx := errgroup.WithContext(ctx) + + var du *types.DiskUsage + eg.Go(func() error { + var err error + du, err = s.backend.SystemDiskUsage(ctx) + return err + }) + + var builderSize int64 // legacy + eg.Go(func() error { + var err error + builderSize, err = s.fscache.DiskUsage(ctx) + if err != nil { + return pkgerrors.Wrap(err, "error getting fscache build cache usage") + } + return nil + }) + + var buildCache []*types.BuildCache + eg.Go(func() error { + var err error + buildCache, err = s.builder.DiskUsage(ctx) + if err != nil { + return pkgerrors.Wrap(err, "error getting build cache usage") + } + return nil + }) + + if err := eg.Wait(); err != nil { return err } - builderSize, err := s.builder.DiskUsage(ctx) - if err != nil { - return pkgerrors.Wrap(err, "error getting build cache usage") + + for _, b := range buildCache { + builderSize += b.Size } + du.BuilderSize = builderSize + du.BuildCache = buildCache return httputils.WriteJSON(w, http.StatusOK, du) } diff --git a/api/types/client.go b/api/types/client.go index 3d2e057c9a..3df8d23368 100644 --- a/api/types/client.go +++ b/api/types/client.go @@ -181,8 +181,24 @@ type ImageBuildOptions struct { Target string SessionID string Platform string + // Version specifies the version of the unerlying builder to use + Version BuilderVersion + // BuildID is an optional identifier that can be passed together with the + // build request. The same identifier can be used to gracefully cancel the + // build with the cancel request. + BuildID string } +// BuilderVersion sets the version of underlying builder to use +type BuilderVersion string + +const ( + // BuilderV1 is the first generation builder in docker daemon + BuilderV1 BuilderVersion = "1" + // BuilderBuildKit is builder based on moby/buildkit project + BuilderBuildKit = "2" +) + // ImageBuildResponse holds information // returned by a server after building // an image. diff --git a/api/types/types.go b/api/types/types.go index 729f4eb6c4..06c0ca3a69 100644 --- a/api/types/types.go +++ b/api/types/types.go @@ -512,7 +512,8 @@ type DiskUsage struct { Images []*ImageSummary Containers []*Container Volumes []*Volume - BuilderSize int64 + BuildCache []*BuildCache + BuilderSize int64 // deprecated } // ContainersPruneReport contains the response for Engine API: @@ -585,3 +586,17 @@ type PushResult struct { type BuildResult struct { ID string } + +// BuildCache contains information about a build cache record +type BuildCache struct { + ID string + Mutable bool + InUse bool + Size int64 + + CreatedAt time.Time + LastUsedAt *time.Time + UsageCount int + Parent string + Description string +} diff --git a/builder/builder-next/adapters/containerimage/pull.go b/builder/builder-next/adapters/containerimage/pull.go new file mode 100644 index 0000000000..b84d2e8589 --- /dev/null +++ b/builder/builder-next/adapters/containerimage/pull.go @@ -0,0 +1,724 @@ +package containerimage + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "runtime" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" + ctdreference "github.com/containerd/containerd/reference" + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + "github.com/containerd/containerd/remotes/docker/schema1" + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + pkgprogress "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/auth" + "github.com/moby/buildkit/source" + "github.com/moby/buildkit/util/flightcontrol" + "github.com/moby/buildkit/util/imageutil" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/tracing" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/time/rate" +) + +const preferLocal = true // FIXME: make this optional from the op + +// SourceOpt is options for creating the image source +type SourceOpt struct { + SessionManager *session.Manager + ContentStore content.Store + CacheAccessor cache.Accessor + ReferenceStore reference.Store + DownloadManager distribution.RootFSDownloadManager + MetadataStore metadata.V2MetadataService + ImageStore image.Store +} + +type imageSource struct { + SourceOpt + g flightcontrol.Group +} + +// NewSource creates a new image source +func NewSource(opt SourceOpt) (source.Source, error) { + is := &imageSource{ + SourceOpt: opt, + } + + return is, nil +} + +func (is *imageSource) ID() string { + return source.DockerImageScheme +} + +func (is *imageSource) getResolver(ctx context.Context) remotes.Resolver { + return docker.NewResolver(docker.ResolverOptions{ + Client: tracing.DefaultClient, + Credentials: is.getCredentialsFromSession(ctx), + }) +} + +func (is *imageSource) getCredentialsFromSession(ctx context.Context) func(string) (string, string, error) { + id := session.FromContext(ctx) + if id == "" { + return nil + } + return func(host string) (string, string, error) { + timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + caller, err := is.SessionManager.Get(timeoutCtx, id) + if err != nil { + return "", "", err + } + + return auth.CredentialsFunc(tracing.ContextWithSpanFromContext(context.TODO(), ctx), caller)(host) + } +} + +func (is *imageSource) resolveLocal(refStr string) ([]byte, error) { + ref, err := distreference.ParseNormalizedNamed(refStr) + if err != nil { + return nil, err + } + dgst, err := is.ReferenceStore.Get(ref) + if err != nil { + return nil, err + } + img, err := is.ImageStore.Get(image.ID(dgst)) + if err != nil { + return nil, err + } + return img.RawJSON(), nil +} + +func (is *imageSource) ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error) { + if preferLocal { + dt, err := is.resolveLocal(ref) + if err == nil { + return "", dt, nil + } + } + + type t struct { + dgst digest.Digest + dt []byte + } + res, err := is.g.Do(ctx, ref, func(ctx context.Context) (interface{}, error) { + dgst, dt, err := imageutil.Config(ctx, ref, is.getResolver(ctx), is.ContentStore, "") + if err != nil { + return nil, err + } + return &t{dgst: dgst, dt: dt}, nil + }) + if err != nil { + return "", nil, err + } + typed := res.(*t) + return typed.dgst, typed.dt, nil +} + +func (is *imageSource) Resolve(ctx context.Context, id source.Identifier) (source.SourceInstance, error) { + imageIdentifier, ok := id.(*source.ImageIdentifier) + if !ok { + return nil, errors.Errorf("invalid image identifier %v", id) + } + + p := &puller{ + src: imageIdentifier, + is: is, + resolver: is.getResolver(ctx), + } + return p, nil +} + +type puller struct { + is *imageSource + resolveOnce sync.Once + resolveLocalOnce sync.Once + src *source.ImageIdentifier + desc ocispec.Descriptor + ref string + resolveErr error + resolver remotes.Resolver + config []byte +} + +func (p *puller) mainManifestKey(dgst digest.Digest) (digest.Digest, error) { + dt, err := json.Marshal(struct { + Digest digest.Digest + OS string + Arch string + }{ + Digest: p.desc.Digest, + OS: runtime.GOOS, + Arch: runtime.GOARCH, + }) + if err != nil { + return "", err + } + return digest.FromBytes(dt), nil +} + +func (p *puller) resolveLocal() { + p.resolveLocalOnce.Do(func() { + dgst := p.src.Reference.Digest() + if dgst != "" { + info, err := p.is.ContentStore.Info(context.TODO(), dgst) + if err == nil { + p.ref = p.src.Reference.String() + desc := ocispec.Descriptor{ + Size: info.Size, + Digest: dgst, + } + ra, err := p.is.ContentStore.ReaderAt(context.TODO(), desc) + if err == nil { + mt, err := imageutil.DetectManifestMediaType(ra) + if err == nil { + desc.MediaType = mt + p.desc = desc + } + } + } + } + + if preferLocal { + dt, err := p.is.resolveLocal(p.src.Reference.String()) + if err == nil { + p.config = dt + } + } + }) +} + +func (p *puller) resolve(ctx context.Context) error { + p.resolveOnce.Do(func() { + resolveProgressDone := oneOffProgress(ctx, "resolve "+p.src.Reference.String()) + + ref, err := distreference.ParseNormalizedNamed(p.src.Reference.String()) + if err != nil { + p.resolveErr = err + resolveProgressDone(err) + return + } + + if p.desc.Digest == "" && p.config == nil { + origRef, desc, err := p.resolver.Resolve(ctx, ref.String()) + if err != nil { + p.resolveErr = err + resolveProgressDone(err) + return + } + + p.desc = desc + p.ref = origRef + } + + // Schema 1 manifests cannot be resolved to an image config + // since the conversion must take place after all the content + // has been read. + // It may be possible to have a mapping between schema 1 manifests + // and the schema 2 manifests they are converted to. + if p.config == nil && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest { + ref, err := distreference.WithDigest(ref, p.desc.Digest) + if err != nil { + p.resolveErr = err + resolveProgressDone(err) + return + } + + _, dt, err := p.is.ResolveImageConfig(ctx, ref.String()) + if err != nil { + p.resolveErr = err + resolveProgressDone(err) + return + } + + p.config = dt + } + resolveProgressDone(nil) + }) + return p.resolveErr +} + +func (p *puller) CacheKey(ctx context.Context, index int) (string, bool, error) { + p.resolveLocal() + + if p.desc.Digest != "" && index == 0 { + dgst, err := p.mainManifestKey(p.desc.Digest) + if err != nil { + return "", false, err + } + return dgst.String(), false, nil + } + + if p.config != nil { + return cacheKeyFromConfig(p.config).String(), true, nil + } + + if err := p.resolve(ctx); err != nil { + return "", false, err + } + + if p.desc.Digest != "" && index == 0 { + dgst, err := p.mainManifestKey(p.desc.Digest) + if err != nil { + return "", false, err + } + return dgst.String(), false, nil + } + + return cacheKeyFromConfig(p.config).String(), true, nil +} + +func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) { + p.resolveLocal() + if err := p.resolve(ctx); err != nil { + return nil, err + } + + if p.config != nil { + img, err := p.is.ImageStore.Get(image.ID(digest.FromBytes(p.config))) + if err == nil { + if len(img.RootFS.DiffIDs) == 0 { + return nil, nil + } + ref, err := p.is.CacheAccessor.GetFromSnapshotter(ctx, string(img.RootFS.ChainID()), cache.WithDescription(fmt.Sprintf("from local %s", p.ref))) + if err != nil { + return nil, err + } + return ref, nil + } + } + + ongoing := newJobs(p.ref) + + pctx, stopProgress := context.WithCancel(ctx) + + pw, _, ctx := progress.FromContext(ctx) + defer pw.Close() + + progressDone := make(chan struct{}) + go func() { + showProgress(pctx, ongoing, p.is.ContentStore, pw) + close(progressDone) + }() + defer func() { + <-progressDone + }() + + fetcher, err := p.resolver.Fetcher(ctx, p.ref) + if err != nil { + stopProgress() + return nil, err + } + + var ( + schema1Converter *schema1.Converter + handlers []images.Handler + ) + if p.desc.MediaType == images.MediaTypeDockerSchema1Manifest { + schema1Converter = schema1.NewConverter(p.is.ContentStore, fetcher) + handlers = append(handlers, schema1Converter) + + // TODO: Optimize to do dispatch and integrate pulling with download manager, + // leverage existing blob mapping and layer storage + } else { + + // TODO: need a wrapper snapshot interface that combines content + // and snapshots as 1) buildkit shouldn't have a dependency on contentstore + // or 2) cachemanager should manage the contentstore + handlers = append(handlers, images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, + images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex, + images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + default: + return nil, images.ErrSkipDesc + } + ongoing.add(desc) + return nil, nil + })) + + // Get all the children for a descriptor + childrenHandler := images.ChildrenHandler(p.is.ContentStore) + // Set any children labels for that content + childrenHandler = images.SetChildrenLabels(p.is.ContentStore, childrenHandler) + // Filter the childen by the platform + childrenHandler = images.FilterPlatforms(childrenHandler, platforms.Default()) + + handlers = append(handlers, + remotes.FetchHandler(p.is.ContentStore, fetcher), + childrenHandler, + ) + } + + if err := images.Dispatch(ctx, images.Handlers(handlers...), p.desc); err != nil { + stopProgress() + return nil, err + } + defer stopProgress() + + if schema1Converter != nil { + p.desc, err = schema1Converter.Convert(ctx) + if err != nil { + return nil, err + } + } + + mfst, err := images.Manifest(ctx, p.is.ContentStore, p.desc, platforms.Default()) + if err != nil { + return nil, err + } + + config, err := images.Config(ctx, p.is.ContentStore, p.desc, platforms.Default()) + if err != nil { + return nil, err + } + + dt, err := content.ReadBlob(ctx, p.is.ContentStore, config) + if err != nil { + return nil, err + } + + var img ocispec.Image + if err := json.Unmarshal(dt, &img); err != nil { + return nil, err + } + + if len(mfst.Layers) != len(img.RootFS.DiffIDs) { + return nil, errors.Errorf("invalid config for manifest") + } + + pchan := make(chan pkgprogress.Progress, 10) + defer close(pchan) + + go func() { + m := map[string]struct { + st time.Time + limiter *rate.Limiter + }{} + for p := range pchan { + if p.Action == "Extracting" { + st, ok := m[p.ID] + if !ok { + st.st = time.Now() + st.limiter = rate.NewLimiter(rate.Every(100*time.Millisecond), 1) + m[p.ID] = st + } + var end *time.Time + if p.LastUpdate || st.limiter.Allow() { + if p.LastUpdate { + tm := time.Now() + end = &tm + } + pw.Write("extracting "+p.ID, progress.Status{ + Action: "extract", + Started: &st.st, + Completed: end, + }) + } + } + } + }() + + if len(mfst.Layers) == 0 { + return nil, nil + } + + layers := make([]xfer.DownloadDescriptor, 0, len(mfst.Layers)) + + for i, desc := range mfst.Layers { + ongoing.add(desc) + layers = append(layers, &layerDescriptor{ + desc: desc, + diffID: layer.DiffID(img.RootFS.DiffIDs[i]), + fetcher: fetcher, + ref: p.src.Reference, + is: p.is, + }) + } + + defer func() { + <-progressDone + for _, desc := range mfst.Layers { + p.is.ContentStore.Delete(context.TODO(), desc.Digest) + } + }() + + r := image.NewRootFS() + rootFS, release, err := p.is.DownloadManager.Download(ctx, *r, runtime.GOOS, layers, pkgprogress.ChanOutput(pchan)) + if err != nil { + return nil, err + } + stopProgress() + + ref, err := p.is.CacheAccessor.GetFromSnapshotter(ctx, string(rootFS.ChainID()), cache.WithDescription(fmt.Sprintf("pulled from %s", p.ref))) + release() + if err != nil { + return nil, err + } + + return ref, nil +} + +// Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) +type layerDescriptor struct { + is *imageSource + fetcher remotes.Fetcher + desc ocispec.Descriptor + diffID layer.DiffID + ref ctdreference.Spec +} + +func (ld *layerDescriptor) Key() string { + return "v2:" + ld.desc.Digest.String() +} + +func (ld *layerDescriptor) ID() string { + return ld.desc.Digest.String() +} + +func (ld *layerDescriptor) DiffID() (layer.DiffID, error) { + return ld.diffID, nil +} + +func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) { + rc, err := ld.fetcher.Fetch(ctx, ld.desc) + if err != nil { + return nil, 0, err + } + defer rc.Close() + + refKey := remotes.MakeRefKey(ctx, ld.desc) + + ld.is.ContentStore.Abort(ctx, refKey) + + if err := content.WriteBlob(ctx, ld.is.ContentStore, refKey, rc, ld.desc); err != nil { + ld.is.ContentStore.Abort(ctx, refKey) + return nil, 0, err + } + + ra, err := ld.is.ContentStore.ReaderAt(ctx, ld.desc) + if err != nil { + return nil, 0, err + } + + return ioutil.NopCloser(content.NewReader(ra)), ld.desc.Size, nil +} + +func (ld *layerDescriptor) Close() { + // ld.is.ContentStore.Delete(context.TODO(), ld.desc.Digest)) +} + +func (ld *layerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.is.MetadataStore.Add(diffID, metadata.V2Metadata{Digest: ld.desc.Digest, SourceRepository: ld.ref.Locator}) +} + +func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, pw progress.Writer) { + var ( + ticker = time.NewTicker(100 * time.Millisecond) + statuses = map[string]statusInfo{} + done bool + ) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + case <-ctx.Done(): + done = true + } + + resolved := "resolved" + if !ongoing.isResolved() { + resolved = "resolving" + } + statuses[ongoing.name] = statusInfo{ + Ref: ongoing.name, + Status: resolved, + } + + actives := make(map[string]statusInfo) + + if !done { + active, err := cs.ListStatuses(ctx) + if err != nil { + // log.G(ctx).WithError(err).Error("active check failed") + continue + } + // update status of active entries! + for _, active := range active { + actives[active.Ref] = statusInfo{ + Ref: active.Ref, + Status: "downloading", + Offset: active.Offset, + Total: active.Total, + StartedAt: active.StartedAt, + UpdatedAt: active.UpdatedAt, + } + } + } + + // now, update the items in jobs that are not in active + for _, j := range ongoing.jobs() { + refKey := remotes.MakeRefKey(ctx, j.Descriptor) + if a, ok := actives[refKey]; ok { + started := j.started + pw.Write(j.Digest.String(), progress.Status{ + Action: a.Status, + Total: int(a.Total), + Current: int(a.Offset), + Started: &started, + }) + continue + } + + if !j.done { + info, err := cs.Info(context.TODO(), j.Digest) + if err != nil { + if errdefs.IsNotFound(err) { + // pw.Write(j.Digest.String(), progress.Status{ + // Action: "waiting", + // }) + continue + } + } else { + j.done = true + } + + if done || j.done { + started := j.started + createdAt := info.CreatedAt + pw.Write(j.Digest.String(), progress.Status{ + Action: "done", + Current: int(info.Size), + Total: int(info.Size), + Completed: &createdAt, + Started: &started, + }) + } + } + } + if done { + return + } + } +} + +// jobs provides a way of identifying the download keys for a particular task +// encountering during the pull walk. +// +// This is very minimal and will probably be replaced with something more +// featured. +type jobs struct { + name string + added map[digest.Digest]job + mu sync.Mutex + resolved bool +} + +type job struct { + ocispec.Descriptor + done bool + started time.Time +} + +func newJobs(name string) *jobs { + return &jobs{ + name: name, + added: make(map[digest.Digest]job), + } +} + +func (j *jobs) add(desc ocispec.Descriptor) { + j.mu.Lock() + defer j.mu.Unlock() + + if _, ok := j.added[desc.Digest]; ok { + return + } + j.added[desc.Digest] = job{ + Descriptor: desc, + started: time.Now(), + } +} + +func (j *jobs) jobs() []job { + j.mu.Lock() + defer j.mu.Unlock() + + descs := make([]job, 0, len(j.added)) + for _, j := range j.added { + descs = append(descs, j) + } + return descs +} + +func (j *jobs) isResolved() bool { + j.mu.Lock() + defer j.mu.Unlock() + return j.resolved +} + +type statusInfo struct { + Ref string + Status string + Offset int64 + Total int64 + StartedAt time.Time + UpdatedAt time.Time +} + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.FromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + // TODO: set error on status + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} + +// cacheKeyFromConfig returns a stable digest from image config. If image config +// is a known oci image we will use chainID of layers. +func cacheKeyFromConfig(dt []byte) digest.Digest { + var img ocispec.Image + err := json.Unmarshal(dt, &img) + if err != nil { + return digest.FromBytes(dt) + } + if img.RootFS.Type != "layers" { + return digest.FromBytes(dt) + } + return identity.ChainID(img.RootFS.DiffIDs) +} diff --git a/builder/builder-next/adapters/snapshot/layer.go b/builder/builder-next/adapters/snapshot/layer.go new file mode 100644 index 0000000000..d0aa6f28fa --- /dev/null +++ b/builder/builder-next/adapters/snapshot/layer.go @@ -0,0 +1,113 @@ +package snapshot + +import ( + "context" + "os" + "path/filepath" + + "github.com/boltdb/bolt" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +func (s *snapshotter) EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error) { + if l, err := s.getLayer(key, true); err != nil { + return nil, err + } else if l != nil { + return getDiffChain(l), nil + } + + id, committed := s.getGraphDriverID(key) + if !committed { + return nil, errors.Errorf("can not convert active %s to layer", key) + } + + info, err := s.Stat(ctx, key) + if err != nil { + return nil, err + } + + eg, gctx := errgroup.WithContext(ctx) + + // TODO: add flightcontrol + + var parentChainID layer.ChainID + if info.Parent != "" { + eg.Go(func() error { + diffIDs, err := s.EnsureLayer(gctx, info.Parent) + if err != nil { + return err + } + parentChainID = layer.CreateChainID(diffIDs) + return nil + }) + } + + tmpDir, err := ioutils.TempDir("", "docker-tarsplit") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmpDir) + tarSplitPath := filepath.Join(tmpDir, "tar-split") + + var diffID layer.DiffID + var size int64 + eg.Go(func() error { + parent := "" + if p := info.Parent; p != "" { + if l, err := s.getLayer(p, true); err != nil { + return err + } else if l != nil { + parent, err = getGraphID(l) + if err != nil { + return err + } + } else { + parent, _ = s.getGraphDriverID(info.Parent) + } + } + diffID, size, err = s.reg.ChecksumForGraphID(id, parent, "", tarSplitPath) + return err + }) + + if err := eg.Wait(); err != nil { + return nil, err + } + + l, err := s.reg.RegisterByGraphID(id, parentChainID, diffID, tarSplitPath, size) + if err != nil { + return nil, err + } + + if err := s.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(key)) + b.Put(keyChainID, []byte(l.ChainID())) + return nil + }); err != nil { + return nil, err + } + + s.mu.Lock() + s.refs[key] = l + s.mu.Unlock() + + return getDiffChain(l), nil +} + +func getDiffChain(l layer.Layer) []layer.DiffID { + if p := l.Parent(); p != nil { + return append(getDiffChain(p), l.DiffID()) + } + return []layer.DiffID{l.DiffID()} +} + +func getGraphID(l layer.Layer) (string, error) { + if l, ok := l.(interface { + CacheID() string + }); ok { + return l.CacheID(), nil + } + return "", errors.Errorf("couldn't access cacheID for %s", l.ChainID()) +} diff --git a/builder/builder-next/adapters/snapshot/snapshot.go b/builder/builder-next/adapters/snapshot/snapshot.go new file mode 100644 index 0000000000..9934c8ae3a --- /dev/null +++ b/builder/builder-next/adapters/snapshot/snapshot.go @@ -0,0 +1,445 @@ +package snapshot + +import ( + "context" + "path/filepath" + "strconv" + "strings" + "sync" + + "github.com/boltdb/bolt" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/snapshots" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/layer" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/snapshot" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +var keyParent = []byte("parent") +var keyCommitted = []byte("committed") +var keyChainID = []byte("chainid") +var keySize = []byte("size") + +// Opt defines options for creating the snapshotter +type Opt struct { + GraphDriver graphdriver.Driver + LayerStore layer.Store + Root string +} + +type graphIDRegistrar interface { + RegisterByGraphID(string, layer.ChainID, layer.DiffID, string, int64) (layer.Layer, error) + Release(layer.Layer) ([]layer.Metadata, error) + checksumCalculator +} + +type checksumCalculator interface { + ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID layer.DiffID, size int64, err error) +} + +type snapshotter struct { + opt Opt + + refs map[string]layer.Layer + db *bolt.DB + mu sync.Mutex + reg graphIDRegistrar +} + +var _ snapshot.SnapshotterBase = &snapshotter{} + +// NewSnapshotter creates a new snapshotter +func NewSnapshotter(opt Opt) (snapshot.SnapshotterBase, error) { + dbPath := filepath.Join(opt.Root, "snapshots.db") + db, err := bolt.Open(dbPath, 0600, nil) + if err != nil { + return nil, errors.Wrapf(err, "failed to open database file %s", dbPath) + } + + reg, ok := opt.LayerStore.(graphIDRegistrar) + if !ok { + return nil, errors.Errorf("layerstore doesn't support graphID registration") + } + + s := &snapshotter{ + opt: opt, + db: db, + refs: map[string]layer.Layer{}, + reg: reg, + } + return s, nil +} + +func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) error { + origParent := parent + if parent != "" { + if l, err := s.getLayer(parent, false); err != nil { + return err + } else if l != nil { + parent, err = getGraphID(l) + if err != nil { + return err + } + } else { + parent, _ = s.getGraphDriverID(parent) + } + } + if err := s.opt.GraphDriver.Create(key, parent, nil); err != nil { + return err + } + if err := s.db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte(key)) + if err != nil { + return err + } + + if err := b.Put(keyParent, []byte(origParent)); err != nil { + return err + } + return nil + }); err != nil { + return err + } + return nil +} + +func (s *snapshotter) chainID(key string) (layer.ChainID, bool) { + if strings.HasPrefix(key, "sha256:") { + dgst, err := digest.Parse(key) + if err != nil { + return "", false + } + return layer.ChainID(dgst), true + } + return "", false +} + +func (s *snapshotter) getLayer(key string, withCommitted bool) (layer.Layer, error) { + s.mu.Lock() + l, ok := s.refs[key] + if !ok { + id, ok := s.chainID(key) + if !ok { + if !withCommitted { + s.mu.Unlock() + return nil, nil + } + if err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(key)) + if b == nil { + return nil + } + v := b.Get(keyChainID) + if v != nil { + id = layer.ChainID(v) + } + return nil + }); err != nil { + s.mu.Unlock() + return nil, err + } + if id == "" { + s.mu.Unlock() + return nil, nil + } + } + var err error + l, err = s.opt.LayerStore.Get(id) + if err != nil { + s.mu.Unlock() + return nil, err + } + s.refs[key] = l + if err := s.db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucketIfNotExists([]byte(key)) + return err + }); err != nil { + s.mu.Unlock() + return nil, err + } + } + s.mu.Unlock() + + return l, nil +} + +func (s *snapshotter) getGraphDriverID(key string) (string, bool) { + var gdID string + if err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(key)) + if b == nil { + return errors.Errorf("not found") // TODO: typed + } + v := b.Get(keyCommitted) + if v != nil { + gdID = string(v) + } + return nil + }); err != nil || gdID == "" { + return key, false + } + return gdID, true +} + +func (s *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) { + inf := snapshots.Info{ + Kind: snapshots.KindActive, + } + + l, err := s.getLayer(key, false) + if err != nil { + return snapshots.Info{}, err + } + if l != nil { + if p := l.Parent(); p != nil { + inf.Parent = p.ChainID().String() + } + inf.Kind = snapshots.KindCommitted + inf.Name = key + return inf, nil + } + + l, err = s.getLayer(key, true) + if err != nil { + return snapshots.Info{}, err + } + + id, committed := s.getGraphDriverID(key) + if committed { + inf.Kind = snapshots.KindCommitted + } + + if err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(id)) + if b == nil && l == nil { + return errors.Errorf("snapshot %s not found", id) // TODO: typed + } + inf.Name = key + if b != nil { + v := b.Get(keyParent) + if v != nil { + inf.Parent = string(v) + return nil + } + } + if l != nil { + if p := l.Parent(); p != nil { + inf.Parent = p.ChainID().String() + } + inf.Kind = snapshots.KindCommitted + } + return nil + }); err != nil { + return snapshots.Info{}, err + } + return inf, nil +} + +func (s *snapshotter) Mounts(ctx context.Context, key string) (snapshot.Mountable, error) { + l, err := s.getLayer(key, true) + if err != nil { + return nil, err + } + if l != nil { + id := identity.NewID() + rwlayer, err := s.opt.LayerStore.CreateRWLayer(id, l.ChainID(), nil) + if err != nil { + return nil, err + } + rootfs, err := rwlayer.Mount("") + if err != nil { + return nil, err + } + mnt := []mount.Mount{{ + Source: rootfs.Path(), + Type: "bind", + Options: []string{"rbind"}, + }} + return &constMountable{ + mounts: mnt, + release: func() error { + _, err := s.opt.LayerStore.ReleaseRWLayer(rwlayer) + return err + }, + }, nil + } + + id, _ := s.getGraphDriverID(key) + + rootfs, err := s.opt.GraphDriver.Get(id, "") + if err != nil { + return nil, err + } + mnt := []mount.Mount{{ + Source: rootfs.Path(), + Type: "bind", + Options: []string{"rbind"}, + }} + return &constMountable{ + mounts: mnt, + release: func() error { + return s.opt.GraphDriver.Put(id) + }, + }, nil +} + +func (s *snapshotter) Remove(ctx context.Context, key string) error { + l, err := s.getLayer(key, true) + if err != nil { + return err + } + + id, _ := s.getGraphDriverID(key) + + var found bool + if err := s.db.Update(func(tx *bolt.Tx) error { + found = tx.Bucket([]byte(key)) != nil + if found { + tx.DeleteBucket([]byte(key)) + if id != key { + tx.DeleteBucket([]byte(id)) + } + } + return nil + }); err != nil { + return err + } + + if l != nil { + s.mu.Lock() + delete(s.refs, key) + s.mu.Unlock() + _, err := s.opt.LayerStore.Release(l) + return err + } + + if !found { // this happens when removing views + return nil + } + + return s.opt.GraphDriver.Remove(id) +} + +func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { + return s.db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte(name)) + if err != nil { + return err + } + if err := b.Put(keyCommitted, []byte(key)); err != nil { + return err + } + return nil + }) +} + +func (s *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) (snapshot.Mountable, error) { + return s.Mounts(ctx, parent) +} + +func (s *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapshots.Info) error) error { + return errors.Errorf("not-implemented") +} + +func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) { + // not implemented + return s.Stat(ctx, info.Name) +} + +func (s *snapshotter) Usage(ctx context.Context, key string) (us snapshots.Usage, retErr error) { + usage := snapshots.Usage{} + if l, err := s.getLayer(key, true); err != nil { + return usage, err + } else if l != nil { + s, err := l.DiffSize() + if err != nil { + return usage, err + } + usage.Size = s + return usage, nil + } + + size := int64(-1) + if err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(key)) + if b == nil { + return nil + } + v := b.Get(keySize) + if v != nil { + s, err := strconv.Atoi(string(v)) + if err != nil { + return err + } + size = int64(s) + } + return nil + }); err != nil { + return usage, err + } + + if size != -1 { + usage.Size = size + return usage, nil + } + + id, _ := s.getGraphDriverID(key) + + info, err := s.Stat(ctx, key) + if err != nil { + return usage, err + } + var parent string + if info.Parent != "" { + if l, err := s.getLayer(info.Parent, false); err != nil { + return usage, err + } else if l != nil { + parent, err = getGraphID(l) + if err != nil { + return usage, err + } + } else { + parent, _ = s.getGraphDriverID(info.Parent) + } + } + + diffSize, err := s.opt.GraphDriver.DiffSize(id, parent) + if err != nil { + return usage, err + } + + if err := s.db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte(key)) + if err != nil { + return err + } + return b.Put(keySize, []byte(strconv.Itoa(int(diffSize)))) + }); err != nil { + return usage, err + } + usage.Size = diffSize + return usage, nil +} + +func (s *snapshotter) Close() error { + return s.db.Close() +} + +type constMountable struct { + mounts []mount.Mount + release func() error +} + +func (m *constMountable) Mount() ([]mount.Mount, error) { + return m.mounts, nil +} + +func (m *constMountable) Release() error { + if m.release == nil { + return nil + } + return m.release() +} diff --git a/builder/builder-next/builder.go b/builder/builder-next/builder.go new file mode 100644 index 0000000000..8c48d9abbf --- /dev/null +++ b/builder/builder-next/builder.go @@ -0,0 +1,419 @@ +package buildkit + +import ( + "context" + "encoding/json" + "io" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + "github.com/docker/docker/daemon/images" + "github.com/docker/docker/pkg/jsonmessage" + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/control" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/tracing" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + grpcmetadata "google.golang.org/grpc/metadata" +) + +// Opt is option struct required for creating the builder +type Opt struct { + SessionManager *session.Manager + Root string + Dist images.DistributionServices +} + +// Builder can build using BuildKit backend +type Builder struct { + controller *control.Controller + reqBodyHandler *reqBodyHandler + + mu sync.Mutex + jobs map[string]*buildJob +} + +// New creates a new builder +func New(opt Opt) (*Builder, error) { + reqHandler := newReqBodyHandler(tracing.DefaultTransport) + + c, err := newController(reqHandler, opt) + if err != nil { + return nil, err + } + b := &Builder{ + controller: c, + reqBodyHandler: reqHandler, + jobs: map[string]*buildJob{}, + } + return b, nil +} + +// Cancel cancels a build using ID +func (b *Builder) Cancel(ctx context.Context, id string) error { + b.mu.Lock() + if j, ok := b.jobs[id]; ok && j.cancel != nil { + j.cancel() + } + b.mu.Unlock() + return nil +} + +// DiskUsage returns a report about space used by build cache +func (b *Builder) DiskUsage(ctx context.Context) ([]*types.BuildCache, error) { + duResp, err := b.controller.DiskUsage(ctx, &controlapi.DiskUsageRequest{}) + if err != nil { + return nil, err + } + + var items []*types.BuildCache + for _, r := range duResp.Record { + items = append(items, &types.BuildCache{ + ID: r.ID, + Mutable: r.Mutable, + InUse: r.InUse, + Size: r.Size_, + + CreatedAt: r.CreatedAt, + LastUsedAt: r.LastUsedAt, + UsageCount: int(r.UsageCount), + Parent: r.Parent, + Description: r.Description, + }) + } + return items, nil +} + +// Prune clears all reclaimable build cache +func (b *Builder) Prune(ctx context.Context) (int64, error) { + ch := make(chan *controlapi.UsageRecord) + + eg, ctx := errgroup.WithContext(ctx) + + eg.Go(func() error { + defer close(ch) + return b.controller.Prune(&controlapi.PruneRequest{}, &pruneProxy{ + streamProxy: streamProxy{ctx: ctx}, + ch: ch, + }) + }) + + var size int64 + eg.Go(func() error { + for r := range ch { + size += r.Size_ + } + return nil + }) + + if err := eg.Wait(); err != nil { + return 0, err + } + + return size, nil +} + +// Build executes a build request +func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.Result, error) { + var rc = opt.Source + + if buildID := opt.Options.BuildID; buildID != "" { + b.mu.Lock() + + upload := false + if strings.HasPrefix(buildID, "upload-request:") { + upload = true + buildID = strings.TrimPrefix(buildID, "upload-request:") + } + + if _, ok := b.jobs[buildID]; !ok { + b.jobs[buildID] = newBuildJob() + } + j := b.jobs[buildID] + var cancel func() + ctx, cancel = context.WithCancel(ctx) + j.cancel = cancel + b.mu.Unlock() + + if upload { + ctx2, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + err := j.SetUpload(ctx2, rc) + return nil, err + } + + if remoteContext := opt.Options.RemoteContext; remoteContext == "upload-request" { + ctx2, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + var err error + rc, err = j.WaitUpload(ctx2) + if err != nil { + return nil, err + } + opt.Options.RemoteContext = "" + } + + defer func() { + delete(b.jobs, buildID) + }() + } + + var out builder.Result + + id := identity.NewID() + + frontendAttrs := map[string]string{} + + if opt.Options.Target != "" { + frontendAttrs["target"] = opt.Options.Target + } + + if opt.Options.Dockerfile != "" && opt.Options.Dockerfile != "." { + frontendAttrs["filename"] = opt.Options.Dockerfile + } + + if opt.Options.RemoteContext != "" { + if opt.Options.RemoteContext != "client-session" { + frontendAttrs["context"] = opt.Options.RemoteContext + } + } else { + url, cancel := b.reqBodyHandler.newRequest(rc) + defer cancel() + frontendAttrs["context"] = url + } + + cacheFrom := append([]string{}, opt.Options.CacheFrom...) + + frontendAttrs["cache-from"] = strings.Join(cacheFrom, ",") + + for k, v := range opt.Options.BuildArgs { + if v == nil { + continue + } + frontendAttrs["build-arg:"+k] = *v + } + + for k, v := range opt.Options.Labels { + frontendAttrs["label:"+k] = v + } + + if opt.Options.NoCache { + frontendAttrs["no-cache"] = "" + } + + exporterAttrs := map[string]string{} + + if len(opt.Options.Tags) > 0 { + exporterAttrs["name"] = strings.Join(opt.Options.Tags, ",") + } + + req := &controlapi.SolveRequest{ + Ref: id, + Exporter: "moby", + ExporterAttrs: exporterAttrs, + Frontend: "dockerfile.v0", + FrontendAttrs: frontendAttrs, + Session: opt.Options.SessionID, + } + + eg, ctx := errgroup.WithContext(ctx) + + eg.Go(func() error { + resp, err := b.controller.Solve(ctx, req) + if err != nil { + return err + } + id, ok := resp.ExporterResponse["containerimage.digest"] + if !ok { + return errors.Errorf("missing image id") + } + out.ImageID = id + return nil + }) + + ch := make(chan *controlapi.StatusResponse) + + eg.Go(func() error { + defer close(ch) + return b.controller.Status(&controlapi.StatusRequest{ + Ref: id, + }, &statusProxy{streamProxy: streamProxy{ctx: ctx}, ch: ch}) + }) + + eg.Go(func() error { + for sr := range ch { + dt, err := sr.Marshal() + if err != nil { + return err + } + + auxJSONBytes, err := json.Marshal(dt) + if err != nil { + return err + } + auxJSON := new(json.RawMessage) + *auxJSON = auxJSONBytes + msgJSON, err := json.Marshal(&jsonmessage.JSONMessage{ID: "moby.buildkit.trace", Aux: auxJSON}) + if err != nil { + return err + } + msgJSON = append(msgJSON, []byte("\r\n")...) + n, err := opt.ProgressWriter.Output.Write(msgJSON) + if err != nil { + return err + } + if n != len(msgJSON) { + return io.ErrShortWrite + } + } + return nil + }) + + if err := eg.Wait(); err != nil { + return nil, err + } + + return &out, nil +} + +type streamProxy struct { + ctx context.Context +} + +func (sp *streamProxy) SetHeader(_ grpcmetadata.MD) error { + return nil +} + +func (sp *streamProxy) SendHeader(_ grpcmetadata.MD) error { + return nil +} + +func (sp *streamProxy) SetTrailer(_ grpcmetadata.MD) { +} + +func (sp *streamProxy) Context() context.Context { + return sp.ctx +} +func (sp *streamProxy) RecvMsg(m interface{}) error { + return io.EOF +} + +type statusProxy struct { + streamProxy + ch chan *controlapi.StatusResponse +} + +func (sp *statusProxy) Send(resp *controlapi.StatusResponse) error { + return sp.SendMsg(resp) +} +func (sp *statusProxy) SendMsg(m interface{}) error { + if sr, ok := m.(*controlapi.StatusResponse); ok { + sp.ch <- sr + } + return nil +} + +type pruneProxy struct { + streamProxy + ch chan *controlapi.UsageRecord +} + +func (sp *pruneProxy) Send(resp *controlapi.UsageRecord) error { + return sp.SendMsg(resp) +} +func (sp *pruneProxy) SendMsg(m interface{}) error { + if sr, ok := m.(*controlapi.UsageRecord); ok { + sp.ch <- sr + } + return nil +} + +type contentStoreNoLabels struct { + content.Store +} + +func (c *contentStoreNoLabels) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + return content.Info{}, nil +} + +type wrapRC struct { + io.ReadCloser + once sync.Once + err error + waitCh chan struct{} +} + +func (w *wrapRC) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + if err != nil { + e := err + if e == io.EOF { + e = nil + } + w.close(e) + } + return n, err +} + +func (w *wrapRC) Close() error { + err := w.ReadCloser.Close() + w.close(err) + return err +} + +func (w *wrapRC) close(err error) { + w.once.Do(func() { + w.err = err + close(w.waitCh) + }) +} + +func (w *wrapRC) wait() error { + <-w.waitCh + return w.err +} + +type buildJob struct { + cancel func() + waitCh chan func(io.ReadCloser) error +} + +func newBuildJob() *buildJob { + return &buildJob{waitCh: make(chan func(io.ReadCloser) error)} +} + +func (j *buildJob) WaitUpload(ctx context.Context) (io.ReadCloser, error) { + done := make(chan struct{}) + + var upload io.ReadCloser + fn := func(rc io.ReadCloser) error { + w := &wrapRC{ReadCloser: rc, waitCh: make(chan struct{})} + upload = w + close(done) + return w.wait() + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case j.waitCh <- fn: + <-done + return upload, nil + } +} + +func (j *buildJob) SetUpload(ctx context.Context, rc io.ReadCloser) error { + select { + case <-ctx.Done(): + return ctx.Err() + case fn := <-j.waitCh: + return fn(rc) + } +} diff --git a/builder/builder-next/controller.go b/builder/builder-next/controller.go new file mode 100644 index 0000000000..2956affa79 --- /dev/null +++ b/builder/builder-next/controller.go @@ -0,0 +1,157 @@ +package buildkit + +import ( + "net/http" + "os" + "path/filepath" + + "github.com/containerd/containerd/content/local" + "github.com/docker/docker/builder/builder-next/adapters/containerimage" + "github.com/docker/docker/builder/builder-next/adapters/snapshot" + containerimageexp "github.com/docker/docker/builder/builder-next/exporter" + mobyworker "github.com/docker/docker/builder/builder-next/worker" + "github.com/docker/docker/daemon/graphdriver" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/cache/remotecache" + "github.com/moby/buildkit/control" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/frontend/dockerfile" + "github.com/moby/buildkit/frontend/gateway" + "github.com/moby/buildkit/snapshot/blobmapping" + "github.com/moby/buildkit/solver/boltdbcachestorage" + "github.com/moby/buildkit/worker" + "github.com/pkg/errors" +) + +func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) { + if err := os.MkdirAll(opt.Root, 0700); err != nil { + return nil, err + } + + dist := opt.Dist + root := opt.Root + + var driver graphdriver.Driver + if ls, ok := dist.LayerStore.(interface { + Driver() graphdriver.Driver + }); ok { + driver = ls.Driver() + } else { + return nil, errors.Errorf("could not access graphdriver") + } + + sbase, err := snapshot.NewSnapshotter(snapshot.Opt{ + GraphDriver: driver, + LayerStore: dist.LayerStore, + Root: root, + }) + if err != nil { + return nil, err + } + + store, err := local.NewStore(filepath.Join(root, "content")) + if err != nil { + return nil, err + } + store = &contentStoreNoLabels{store} + + md, err := metadata.NewStore(filepath.Join(root, "metadata.db")) + if err != nil { + return nil, err + } + + snapshotter := blobmapping.NewSnapshotter(blobmapping.Opt{ + Content: store, + Snapshotter: sbase, + MetadataStore: md, + }) + + cm, err := cache.NewManager(cache.ManagerOpt{ + Snapshotter: snapshotter, + MetadataStore: md, + }) + if err != nil { + return nil, err + } + + src, err := containerimage.NewSource(containerimage.SourceOpt{ + SessionManager: opt.SessionManager, + CacheAccessor: cm, + ContentStore: store, + DownloadManager: dist.DownloadManager, + MetadataStore: dist.V2MetadataService, + ImageStore: dist.ImageStore, + ReferenceStore: dist.ReferenceStore, + }) + if err != nil { + return nil, err + } + + exec, err := newExecutor(root) + if err != nil { + return nil, err + } + + differ, ok := sbase.(containerimageexp.Differ) + if !ok { + return nil, errors.Errorf("snapshotter doesn't support differ") + } + + exp, err := containerimageexp.New(containerimageexp.Opt{ + ImageStore: dist.ImageStore, + ReferenceStore: dist.ReferenceStore, + Differ: differ, + }) + if err != nil { + return nil, err + } + + cacheStorage, err := boltdbcachestorage.NewStore(filepath.Join(opt.Root, "cache.db")) + if err != nil { + return nil, err + } + + frontends := map[string]frontend.Frontend{} + frontends["dockerfile.v0"] = dockerfile.NewDockerfileFrontend() + frontends["gateway.v0"] = gateway.NewGatewayFrontend() + + wopt := mobyworker.Opt{ + ID: "moby", + SessionManager: opt.SessionManager, + MetadataStore: md, + ContentStore: store, + CacheManager: cm, + Snapshotter: snapshotter, + Executor: exec, + ImageSource: src, + DownloadManager: dist.DownloadManager, + V2MetadataService: dist.V2MetadataService, + Exporters: map[string]exporter.Exporter{ + "moby": exp, + }, + Transport: rt, + } + + wc := &worker.Controller{} + w, err := mobyworker.NewWorker(wopt) + if err != nil { + return nil, err + } + wc.Add(w) + + ci := remotecache.NewCacheImporter(remotecache.ImportOpt{ + Worker: w, + SessionManager: opt.SessionManager, + }) + + return control.NewController(control.Opt{ + SessionManager: opt.SessionManager, + WorkerController: wc, + Frontends: frontends, + CacheKeyStorage: cacheStorage, + // CacheExporter: ce, + CacheImporter: ci, + }) +} diff --git a/builder/builder-next/executor_unix.go b/builder/builder-next/executor_unix.go new file mode 100644 index 0000000000..da54473dd1 --- /dev/null +++ b/builder/builder-next/executor_unix.go @@ -0,0 +1,17 @@ +// +build !windows + +package buildkit + +import ( + "path/filepath" + + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/executor/runcexecutor" +) + +func newExecutor(root string) (executor.Executor, error) { + return runcexecutor.New(runcexecutor.Opt{ + Root: filepath.Join(root, "executor"), + CommandCandidates: []string{"docker-runc", "runc"}, + }) +} diff --git a/builder/builder-next/executor_windows.go b/builder/builder-next/executor_windows.go new file mode 100644 index 0000000000..7b0c6e64c8 --- /dev/null +++ b/builder/builder-next/executor_windows.go @@ -0,0 +1,21 @@ +package buildkit + +import ( + "context" + "errors" + "io" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/executor" +) + +func newExecutor(_ string) (executor.Executor, error) { + return &winExecutor{}, nil +} + +type winExecutor struct { +} + +func (e *winExecutor) Exec(ctx context.Context, meta executor.Meta, rootfs cache.Mountable, mounts []executor.Mount, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error { + return errors.New("buildkit executor not implemented for windows") +} diff --git a/builder/builder-next/exporter/export.go b/builder/builder-next/exporter/export.go new file mode 100644 index 0000000000..818ff00d4e --- /dev/null +++ b/builder/builder-next/exporter/export.go @@ -0,0 +1,146 @@ +package containerimage + +import ( + "context" + "fmt" + "strings" + + distref "github.com/docker/distribution/reference" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/exporter" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +const ( + keyImageName = "name" + exporterImageConfig = "containerimage.config" +) + +// Differ can make a moby layer from a snapshot +type Differ interface { + EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error) +} + +// Opt defines a struct for creating new exporter +type Opt struct { + ImageStore image.Store + ReferenceStore reference.Store + Differ Differ +} + +type imageExporter struct { + opt Opt +} + +// New creates a new moby imagestore exporter +func New(opt Opt) (exporter.Exporter, error) { + im := &imageExporter{opt: opt} + return im, nil +} + +func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { + i := &imageExporterInstance{imageExporter: e} + for k, v := range opt { + switch k { + case keyImageName: + for _, v := range strings.Split(v, ",") { + ref, err := distref.ParseNormalizedNamed(v) + if err != nil { + return nil, err + } + i.targetNames = append(i.targetNames, ref) + } + case exporterImageConfig: + i.config = []byte(v) + default: + logrus.Warnf("image exporter: unknown option %s", k) + } + } + return i, nil +} + +type imageExporterInstance struct { + *imageExporter + targetNames []distref.Named + config []byte +} + +func (e *imageExporterInstance) Name() string { + return "exporting to image" +} + +func (e *imageExporterInstance) Export(ctx context.Context, ref cache.ImmutableRef, opt map[string][]byte) (map[string]string, error) { + if config, ok := opt[exporterImageConfig]; ok { + e.config = config + } + config := e.config + + var diffs []digest.Digest + if ref != nil { + layersDone := oneOffProgress(ctx, "exporting layers") + + if err := ref.Finalize(ctx); err != nil { + return nil, err + } + + diffIDs, err := e.opt.Differ.EnsureLayer(ctx, ref.ID()) + if err != nil { + return nil, err + } + + diffs = make([]digest.Digest, len(diffIDs)) + for i := range diffIDs { + diffs[i] = digest.Digest(diffIDs[i]) + } + + layersDone(nil) + } + + if len(config) == 0 { + var err error + config, err = emptyImageConfig() + if err != nil { + return nil, err + } + } + + history, err := parseHistoryFromConfig(config) + if err != nil { + return nil, err + } + + diffs, history = normalizeLayersAndHistory(diffs, history, ref) + + config, err = patchImageConfig(config, diffs, history) + if err != nil { + return nil, err + } + + configDigest := digest.FromBytes(config) + + configDone := oneOffProgress(ctx, fmt.Sprintf("writing image %s", configDigest)) + id, err := e.opt.ImageStore.Create(config) + if err != nil { + return nil, configDone(err) + } + configDone(nil) + + if e.opt.ReferenceStore != nil { + for _, targetName := range e.targetNames { + tagDone := oneOffProgress(ctx, "naming to "+targetName.String()) + + if err := e.opt.ReferenceStore.AddTag(targetName, digest.Digest(id), true); err != nil { + return nil, tagDone(err) + } + tagDone(nil) + } + } + + return map[string]string{ + "containerimage.digest": id.String(), + }, nil +} diff --git a/builder/builder-next/exporter/writer.go b/builder/builder-next/exporter/writer.go new file mode 100644 index 0000000000..e8fa143fc6 --- /dev/null +++ b/builder/builder-next/exporter/writer.go @@ -0,0 +1,177 @@ +package containerimage + +import ( + "context" + "encoding/json" + "runtime" + "time" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/system" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// const ( +// emptyGZLayer = digest.Digest("sha256:4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1") +// ) + +func emptyImageConfig() ([]byte, error) { + img := ocispec.Image{ + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + } + img.RootFS.Type = "layers" + img.Config.WorkingDir = "/" + img.Config.Env = []string{"PATH=" + system.DefaultPathEnv} + dt, err := json.Marshal(img) + return dt, errors.Wrap(err, "failed to create empty image config") +} + +func parseHistoryFromConfig(dt []byte) ([]ocispec.History, error) { + var config struct { + History []ocispec.History + } + if err := json.Unmarshal(dt, &config); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal history from config") + } + return config.History, nil +} + +func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History) ([]byte, error) { + m := map[string]json.RawMessage{} + if err := json.Unmarshal(dt, &m); err != nil { + return nil, errors.Wrap(err, "failed to parse image config for patch") + } + + var rootFS ocispec.RootFS + rootFS.Type = "layers" + rootFS.DiffIDs = append(rootFS.DiffIDs, dps...) + + dt, err := json.Marshal(rootFS) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal rootfs") + } + m["rootfs"] = dt + + dt, err = json.Marshal(history) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal history") + } + m["history"] = dt + + if _, ok := m["created"]; !ok { + var tm *time.Time + for _, h := range history { + if h.Created != nil { + tm = h.Created + } + } + dt, err = json.Marshal(&tm) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal creation time") + } + m["created"] = dt + } + + dt, err = json.Marshal(m) + return dt, errors.Wrap(err, "failed to marshal config after patch") +} + +func normalizeLayersAndHistory(diffs []digest.Digest, history []ocispec.History, ref cache.ImmutableRef) ([]digest.Digest, []ocispec.History) { + refMeta := getRefMetadata(ref, len(diffs)) + var historyLayers int + for _, h := range history { + if !h.EmptyLayer { + historyLayers++ + } + } + if historyLayers > len(diffs) { + // this case shouldn't happen but if it does force set history layers empty + // from the bottom + logrus.Warn("invalid image config with unaccounted layers") + historyCopy := make([]ocispec.History, 0, len(history)) + var l int + for _, h := range history { + if l >= len(diffs) { + h.EmptyLayer = true + } + if !h.EmptyLayer { + l++ + } + historyCopy = append(historyCopy, h) + } + history = historyCopy + } + + if len(diffs) > historyLayers { + // some history items are missing. add them based on the ref metadata + for _, md := range refMeta[historyLayers:] { + history = append(history, ocispec.History{ + Created: &md.createdAt, + CreatedBy: md.description, + Comment: "buildkit.exporter.image.v0", + }) + } + } + + var layerIndex int + for i, h := range history { + if !h.EmptyLayer { + if h.Created == nil { + h.Created = &refMeta[layerIndex].createdAt + } + layerIndex++ + } + history[i] = h + } + + return diffs, history +} + +type refMetadata struct { + description string + createdAt time.Time +} + +func getRefMetadata(ref cache.ImmutableRef, limit int) []refMetadata { + if limit <= 0 { + return nil + } + meta := refMetadata{ + description: "created by buildkit", // shouldn't be shown but don't fail build + createdAt: time.Now(), + } + if ref == nil { + return append(getRefMetadata(nil, limit-1), meta) + } + if descr := cache.GetDescription(ref.Metadata()); descr != "" { + meta.description = descr + } + meta.createdAt = cache.GetCreatedAt(ref.Metadata()) + p := ref.Parent() + if p != nil { + defer p.Release(context.TODO()) + } + return append(getRefMetadata(p, limit-1), meta) +} + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.FromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + // TODO: set error on status + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} diff --git a/builder/builder-next/reqbodyhandler.go b/builder/builder-next/reqbodyhandler.go new file mode 100644 index 0000000000..48433908fb --- /dev/null +++ b/builder/builder-next/reqbodyhandler.go @@ -0,0 +1,67 @@ +package buildkit + +import ( + "io" + "net/http" + "strings" + "sync" + + "github.com/moby/buildkit/identity" + "github.com/pkg/errors" +) + +const urlPrefix = "build-context-" + +type reqBodyHandler struct { + mu sync.Mutex + rt http.RoundTripper + + requests map[string]io.ReadCloser +} + +func newReqBodyHandler(rt http.RoundTripper) *reqBodyHandler { + return &reqBodyHandler{ + rt: rt, + requests: map[string]io.ReadCloser{}, + } +} + +func (h *reqBodyHandler) newRequest(rc io.ReadCloser) (string, func()) { + id := identity.NewID() + h.mu.Lock() + h.requests[id] = rc + h.mu.Unlock() + return "http://" + urlPrefix + id, func() { + h.mu.Lock() + delete(h.requests, id) + h.mu.Unlock() + } +} + +func (h *reqBodyHandler) RoundTrip(req *http.Request) (*http.Response, error) { + host := req.URL.Host + if strings.HasPrefix(host, urlPrefix) { + if req.Method != "GET" { + return nil, errors.Errorf("invalid request") + } + id := strings.TrimPrefix(host, urlPrefix) + h.mu.Lock() + rc, ok := h.requests[id] + delete(h.requests, id) + h.mu.Unlock() + + if !ok { + return nil, errors.Errorf("context not found") + } + + resp := &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: rc, + ContentLength: -1, + } + + return resp, nil + } + return h.rt.RoundTrip(req) +} diff --git a/builder/builder-next/worker/worker.go b/builder/builder-next/worker/worker.go new file mode 100644 index 0000000000..0089b39e6c --- /dev/null +++ b/builder/builder-next/worker/worker.go @@ -0,0 +1,321 @@ +package worker + +import ( + "context" + "fmt" + "io" + "io/ioutil" + nethttp "net/http" + "runtime" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/rootfs" + "github.com/docker/docker/distribution" + distmetadata "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + pkgprogress "github.com/docker/docker/pkg/progress" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/ops" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/source" + "github.com/moby/buildkit/source/git" + "github.com/moby/buildkit/source/http" + "github.com/moby/buildkit/source/local" + "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/util/progress" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// Opt defines a structure for creating a worker. +type Opt struct { + ID string + Labels map[string]string + SessionManager *session.Manager + MetadataStore *metadata.Store + Executor executor.Executor + Snapshotter snapshot.Snapshotter + ContentStore content.Store + CacheManager cache.Manager + ImageSource source.Source + Exporters map[string]exporter.Exporter + DownloadManager distribution.RootFSDownloadManager + V2MetadataService distmetadata.V2MetadataService + Transport nethttp.RoundTripper +} + +// Worker is a local worker instance with dedicated snapshotter, cache, and so on. +// TODO: s/Worker/OpWorker/g ? +type Worker struct { + Opt + SourceManager *source.Manager +} + +// NewWorker instantiates a local worker +func NewWorker(opt Opt) (*Worker, error) { + sm, err := source.NewManager() + if err != nil { + return nil, err + } + + cm := opt.CacheManager + sm.Register(opt.ImageSource) + + gs, err := git.NewSource(git.Opt{ + CacheAccessor: cm, + MetadataStore: opt.MetadataStore, + }) + if err != nil { + return nil, err + } + + sm.Register(gs) + + hs, err := http.NewSource(http.Opt{ + CacheAccessor: cm, + MetadataStore: opt.MetadataStore, + Transport: opt.Transport, + }) + if err != nil { + return nil, err + } + + sm.Register(hs) + + ss, err := local.NewSource(local.Opt{ + SessionManager: opt.SessionManager, + CacheAccessor: cm, + MetadataStore: opt.MetadataStore, + }) + if err != nil { + return nil, err + } + sm.Register(ss) + + return &Worker{ + Opt: opt, + SourceManager: sm, + }, nil +} + +// ID returns worker ID +func (w *Worker) ID() string { + return w.Opt.ID +} + +// Labels returns map of all worker labels +func (w *Worker) Labels() map[string]string { + return w.Opt.Labels +} + +// LoadRef loads a reference by ID +func (w *Worker) LoadRef(id string) (cache.ImmutableRef, error) { + return w.CacheManager.Get(context.TODO(), id) +} + +// ResolveOp converts a LLB vertex into a LLB operation +func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge) (solver.Op, error) { + switch op := v.Sys().(type) { + case *pb.Op_Source: + return ops.NewSourceOp(v, op, w.SourceManager, w) + case *pb.Op_Exec: + return ops.NewExecOp(v, op, w.CacheManager, w.MetadataStore, w.Executor, w) + case *pb.Op_Build: + return ops.NewBuildOp(v, op, s, w) + default: + return nil, errors.Errorf("could not resolve %v", v) + } +} + +// ResolveImageConfig returns image config for an image +func (w *Worker) ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error) { + // ImageSource is typically source/containerimage + resolveImageConfig, ok := w.ImageSource.(resolveImageConfig) + if !ok { + return "", nil, errors.Errorf("worker %q does not implement ResolveImageConfig", w.ID()) + } + return resolveImageConfig.ResolveImageConfig(ctx, ref) +} + +// Exec executes a process directly on a worker +func (w *Worker) Exec(ctx context.Context, meta executor.Meta, rootFS cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error { + active, err := w.CacheManager.New(ctx, rootFS) + if err != nil { + return err + } + defer active.Release(context.TODO()) + return w.Executor.Exec(ctx, meta, active, nil, stdin, stdout, stderr) +} + +// DiskUsage returns disk usage report +func (w *Worker) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) { + return w.CacheManager.DiskUsage(ctx, opt) +} + +// Prune deletes reclaimable build cache +func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo) error { + return w.CacheManager.Prune(ctx, ch) +} + +// Exporter returns exporter by name +func (w *Worker) Exporter(name string) (exporter.Exporter, error) { + exp, ok := w.Exporters[name] + if !ok { + return nil, errors.Errorf("exporter %q could not be found", name) + } + return exp, nil +} + +// GetRemote returns a remote snapshot reference for a local one +func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool) (*solver.Remote, error) { + return nil, errors.Errorf("getremote not implemented") +} + +// FromRemote converts a remote snapshot reference to a local one +func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) { + rootfs, err := getLayers(ctx, remote.Descriptors) + if err != nil { + return nil, err + } + + layers := make([]xfer.DownloadDescriptor, 0, len(rootfs)) + + for _, l := range rootfs { + // ongoing.add(desc) + layers = append(layers, &layerDescriptor{ + desc: l.Blob, + diffID: layer.DiffID(l.Diff.Digest), + provider: remote.Provider, + w: w, + pctx: ctx, + }) + } + + defer func() { + for _, l := range rootfs { + w.ContentStore.Delete(context.TODO(), l.Blob.Digest) + } + }() + + r := image.NewRootFS() + rootFS, release, err := w.DownloadManager.Download(ctx, *r, runtime.GOOS, layers, &discardProgress{}) + if err != nil { + return nil, err + } + defer release() + + ref, err := w.CacheManager.GetFromSnapshotter(ctx, string(rootFS.ChainID()), cache.WithDescription(fmt.Sprintf("imported %s", remote.Descriptors[len(remote.Descriptors)-1].Digest))) + if err != nil { + return nil, err + } + return ref, nil +} + +type discardProgress struct{} + +func (*discardProgress) WriteProgress(_ pkgprogress.Progress) error { + return nil +} + +// Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) +type layerDescriptor struct { + provider content.Provider + desc ocispec.Descriptor + diffID layer.DiffID + // ref ctdreference.Spec + w *Worker + pctx context.Context +} + +func (ld *layerDescriptor) Key() string { + return "v2:" + ld.desc.Digest.String() +} + +func (ld *layerDescriptor) ID() string { + return ld.desc.Digest.String() +} + +func (ld *layerDescriptor) DiffID() (layer.DiffID, error) { + return ld.diffID, nil +} + +func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) { + done := oneOffProgress(ld.pctx, fmt.Sprintf("pulling %s", ld.desc.Digest)) + if err := contentutil.Copy(ctx, ld.w.ContentStore, ld.provider, ld.desc); err != nil { + return nil, 0, done(err) + } + done(nil) + + ra, err := ld.w.ContentStore.ReaderAt(ctx, ld.desc) + if err != nil { + return nil, 0, err + } + + return ioutil.NopCloser(content.NewReader(ra)), ld.desc.Size, nil +} + +func (ld *layerDescriptor) Close() { + // ld.is.ContentStore.Delete(context.TODO(), ld.desc.Digest) +} + +func (ld *layerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.w.V2MetadataService.Add(diffID, distmetadata.V2Metadata{Digest: ld.desc.Digest}) +} + +func getLayers(ctx context.Context, descs []ocispec.Descriptor) ([]rootfs.Layer, error) { + layers := make([]rootfs.Layer, len(descs)) + for i, desc := range descs { + diffIDStr := desc.Annotations["containerd.io/uncompressed"] + if diffIDStr == "" { + return nil, errors.Errorf("%s missing uncompressed digest", desc.Digest) + } + diffID, err := digest.Parse(diffIDStr) + if err != nil { + return nil, err + } + layers[i].Diff = ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageLayer, + Digest: diffID, + } + layers[i].Blob = ocispec.Descriptor{ + MediaType: desc.MediaType, + Digest: desc.Digest, + Size: desc.Size, + } + } + return layers, nil +} + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.FromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + // TODO: set error on status + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} + +type resolveImageConfig interface { + ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error) +} diff --git a/client/build_cancel.go b/client/build_cancel.go new file mode 100644 index 0000000000..4cf8c980a9 --- /dev/null +++ b/client/build_cancel.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// BuildCancel requests the daemon to cancel ongoing build request +func (cli *Client) BuildCancel(ctx context.Context, id string) error { + query := url.Values{} + query.Set("id", id) + + serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) + if err != nil { + return err + } + defer ensureReaderClosed(serverResp) + + return nil +} diff --git a/client/image_build.go b/client/image_build.go index 6721460316..dff19b989f 100644 --- a/client/image_build.go +++ b/client/image_build.go @@ -133,5 +133,9 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur if options.Platform != "" { query.Set("platform", strings.ToLower(options.Platform)) } + if options.BuildID != "" { + query.Set("buildid", options.BuildID) + } + query.Set("version", string(options.Version)) return query, nil } diff --git a/client/interface.go b/client/interface.go index 0487a0b9f3..9250c468a6 100644 --- a/client/interface.go +++ b/client/interface.go @@ -86,6 +86,7 @@ type DistributionAPIClient interface { type ImageAPIClient interface { ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) + BuildCancel(ctx context.Context, id string) error ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go index 6b0be5f7f7..efefaa1ac3 100644 --- a/cmd/dockerd/daemon.go +++ b/cmd/dockerd/daemon.go @@ -27,6 +27,7 @@ import ( swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" + buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/builder/fscache" "github.com/docker/docker/cli/debug" @@ -238,7 +239,8 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) { type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend - buildCache *fscache.FSCache + buildCache *fscache.FSCache // legacy + buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster @@ -270,7 +272,16 @@ func newRouterOptions(config *config.Config, daemon *daemon.Daemon) (routerOptio return opts, err } - bb, err := buildbackend.NewBackend(daemon.ImageService(), manager, buildCache) + buildkit, err := buildkit.New(buildkit.Opt{ + SessionManager: sm, + Root: filepath.Join(config.Root, "buildkit"), + Dist: daemon.DistributionServices(), + }) + if err != nil { + return opts, err + } + + bb, err := buildbackend.NewBackend(daemon.ImageService(), manager, buildCache, buildkit) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } @@ -279,6 +290,7 @@ func newRouterOptions(config *config.Config, daemon *daemon.Daemon) (routerOptio sessionManager: sm, buildBackend: bb, buildCache: buildCache, + buildkit: buildkit, daemon: daemon, }, nil } @@ -452,7 +464,7 @@ func initRouter(opts routerOptions) { checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder), image.NewRouter(opts.daemon.ImageService()), - systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildCache), + systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildCache, opts.buildkit), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon), sessionrouter.NewRouter(opts.sessionManager), diff --git a/daemon/daemon.go b/daemon/daemon.go index 43b7731a3e..5e5f586ae0 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -922,6 +922,11 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe return d, nil } +// DistributionServices returns services controlling daemon storage +func (daemon *Daemon) DistributionServices() images.DistributionServices { + return daemon.imageService.DistributionServices() +} + func (daemon *Daemon) waitForStartupDone() { <-daemon.startupDone } diff --git a/daemon/images/service.go b/daemon/images/service.go index 4af48959bf..263217dccd 100644 --- a/daemon/images/service.go +++ b/daemon/images/service.go @@ -3,9 +3,11 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" "os" + "runtime" "github.com/docker/docker/container" daemonevents "github.com/docker/docker/daemon/events" + "github.com/docker/docker/distribution" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/distribution/xfer" "github.com/docker/docker/image" @@ -74,6 +76,26 @@ type ImageService struct { uploadManager *xfer.LayerUploadManager } +// DistributionServices provides daemon image storage services +type DistributionServices struct { + DownloadManager distribution.RootFSDownloadManager + V2MetadataService metadata.V2MetadataService + LayerStore layer.Store // TODO: lcow + ImageStore image.Store + ReferenceStore dockerreference.Store +} + +// DistributionServices return services controlling daemon image storage +func (i *ImageService) DistributionServices() DistributionServices { + return DistributionServices{ + DownloadManager: i.downloadManager, + V2MetadataService: metadata.NewV2MetadataService(i.distributionMetadataStore), + LayerStore: i.layerStores[runtime.GOOS], + ImageStore: i.imageStore, + ReferenceStore: i.referenceStore, + } +} + // CountImages returns the number of images stored by ImageService // called from info.go func (i *ImageService) CountImages() int { diff --git a/hack/make/.integration-test-helpers b/hack/make/.integration-test-helpers index bb34d45887..da2bb7cad2 100644 --- a/hack/make/.integration-test-helpers +++ b/hack/make/.integration-test-helpers @@ -84,6 +84,7 @@ test_env() { env -i \ DEST="$ABS_DEST" \ DOCKER_API_VERSION="$DOCKER_API_VERSION" \ + DOCKER_BUILDKIT="$DOCKER_BUILDKIT" \ DOCKER_INTEGRATION_DAEMON_DEST="$DOCKER_INTEGRATION_DAEMON_DEST" \ DOCKER_TLS_VERIFY="$DOCKER_TEST_TLS_VERIFY" \ DOCKER_CERT_PATH="$DOCKER_TEST_CERT_PATH" \ diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 0aae7c4575..a0ac9c767f 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -2534,7 +2534,7 @@ func (s *DockerSuite) TestBuildDockerignoringBadExclusion(c *check.C) { build.WithFile(".dockerignore", "!\n"), )).Assert(c, icmd.Expected{ ExitCode: 1, - Err: "error checking context: 'illegal exclusion pattern: \"!\"", + Err: `illegal exclusion pattern: "!"`, }) } diff --git a/integration-cli/docker_cli_build_unix_test.go b/integration-cli/docker_cli_build_unix_test.go index d6c4370064..4b313f95c1 100644 --- a/integration-cli/docker_cli_build_unix_test.go +++ b/integration-cli/docker_cli_build_unix_test.go @@ -126,8 +126,12 @@ func (s *DockerSuite) TestBuildAddChangeOwnership(c *check.C) { // * Run a 1-year-long sleep from a docker build. // * When docker events sees container start, close the "docker build" command // * Wait for docker events to emit a dying event. +// +// TODO(buildkit): this test needs to be rewritten for buildkit. +// It has been manually tested positive. Confirmed issue: docker build output parsing. +// Potential issue: newEventObserver uses docker events, which is not hooked up to buildkit. func (s *DockerSuite) TestBuildCancellationKillsSleep(c *check.C) { - testRequires(c, DaemonIsLinux) + testRequires(c, DaemonIsLinux, TODOBuildkit) name := "testbuildcancellation" observer, err := newEventObserver(c) diff --git a/integration-cli/docker_cli_health_test.go b/integration-cli/docker_cli_health_test.go index 632830c60b..a06b6c8830 100644 --- a/integration-cli/docker_cli_health_test.go +++ b/integration-cli/docker_cli_health_test.go @@ -90,7 +90,7 @@ func (s *DockerSuite) TestHealth(c *check.C) { buildImageSuccessfully(c, "no_healthcheck", build.WithDockerfile(`FROM testhealth HEALTHCHECK NONE`)) - out, _ = dockerCmd(c, "inspect", "--format={{.ContainerConfig.Healthcheck.Test}}", "no_healthcheck") + out, _ = dockerCmd(c, "inspect", "--format={{.Config.Healthcheck.Test}}", "no_healthcheck") c.Check(out, checker.Equals, "[NONE]\n") // Enable the checks from the CLI diff --git a/integration-cli/requirements_test.go b/integration-cli/requirements_test.go index 9b08a63b0e..e6038edb18 100644 --- a/integration-cli/requirements_test.go +++ b/integration-cli/requirements_test.go @@ -208,6 +208,10 @@ func SwarmInactive() bool { return testEnv.DaemonInfo.Swarm.LocalNodeState == swarm.LocalNodeStateInactive } +func TODOBuildkit() bool { + return os.Getenv("DOCKER_BUILDKIT") == "" +} + // testRequires checks if the environment satisfies the requirements // for the test to run or skips the tests. func testRequires(c requirement.SkipT, requirements ...requirement.Test) { diff --git a/layer/filestore.go b/layer/filestore.go index b1cbb80166..208a0c3a85 100644 --- a/layer/filestore.go +++ b/layer/filestore.go @@ -3,7 +3,6 @@ package layer // import "github.com/docker/docker/layer" import ( "compress/gzip" "encoding/json" - "errors" "fmt" "io" "io/ioutil" @@ -16,6 +15,7 @@ import ( "github.com/docker/distribution" "github.com/docker/docker/pkg/ioutils" "github.com/opencontainers/go-digest" + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -194,8 +194,8 @@ func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { } content := strings.TrimSpace(string(contentBytes)) - if !stringIDRegexp.MatchString(content) { - return "", errors.New("invalid cache id value") + if content == "" { + return "", errors.Errorf("invalid cache id value") } return content, nil diff --git a/layer/layer_store.go b/layer/layer_store.go index bf0705afc5..c1fbf85091 100644 --- a/layer/layer_store.go +++ b/layer/layer_store.go @@ -121,6 +121,10 @@ func newStoreFromGraphDriver(root string, driver graphdriver.Driver, os string) return ls, nil } +func (ls *layerStore) Driver() graphdriver.Driver { + return ls.driver +} + func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { cl, ok := ls.layerMap[layer] if ok { diff --git a/layer/ro_layer.go b/layer/ro_layer.go index bc0fe1dddf..3555e8b027 100644 --- a/layer/ro_layer.go +++ b/layer/ro_layer.go @@ -54,6 +54,10 @@ func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) { return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID) } +func (rl *roLayer) CacheID() string { + return rl.cacheID +} + func (rl *roLayer) ChainID() ChainID { return rl.chainID } diff --git a/vendor.conf b/vendor.conf index a313759028..4ffb3edf56 100644 --- a/vendor.conf +++ b/vendor.conf @@ -27,10 +27,13 @@ github.com/imdario/mergo v0.3.5 golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5 # buildkit -github.com/moby/buildkit 43e758232a0ac7d50c6a11413186e16684fc1e4f -github.com/tonistiigi/fsutil dc68c74458923f357474a9178bd198aa3ed11a5f +github.com/moby/buildkit b062a2d8ddbaa477c25c63d68a9cffbb43f6e474 +github.com/tonistiigi/fsutil 8abad97ee3969cdf5e9c367f46adba2c212b3ddb github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746 github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7 +github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716 +github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc +github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b #get libnetwork packages @@ -72,8 +75,8 @@ github.com/pborman/uuid v1.0 google.golang.org/grpc v1.12.0 -# When updating, also update RUNC_COMMIT in hack/dockerfile/install/runc accordingly -github.com/opencontainers/runc 69663f0bd4b60df09991c08812a60108003fa340 +# This does not need to match RUNC_COMMIT as it is used for helper packages but should be newer or equal +github.com/opencontainers/runc 0e561642f81e84ebd0b3afd6ec510c75a2ccb71b github.com/opencontainers/runtime-spec v1.0.1 github.com/opencontainers/image-spec v1.0.1 github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 @@ -131,7 +134,7 @@ github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b65068 golang.org/x/crypto 1a580b3eff7814fc9b40602fd35256c63b50f491 golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad -github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990 +github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8 github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0 diff --git a/vendor/github.com/containerd/containerd/contrib/README.md b/vendor/github.com/containerd/containerd/contrib/README.md new file mode 100644 index 0000000000..f19f03a609 --- /dev/null +++ b/vendor/github.com/containerd/containerd/contrib/README.md @@ -0,0 +1,11 @@ +# contrib + +The `contrib` directory contains packages that do not belong in the core containerd packages but still contribute to overall containerd usability. + +Package such as Apparmor or Selinux are placed in `contrib` because they are platform dependent and often require higher level tools and profiles to work. + +Packaging and other built tools can be added to `contrib` to aid in packaging containerd for various distributions. + +## Testing + +Code in the `contrib` directory may or may not have been tested in the normal test pipeline for core components. diff --git a/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go new file mode 100644 index 0000000000..2a1806cf87 --- /dev/null +++ b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go @@ -0,0 +1,56 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package seccomp + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/oci" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// WithProfile receives the name of a file stored on disk comprising a json +// formated seccomp profile, as specified by the opencontainers/runtime-spec. +// The profile is read from the file, unmarshaled, and set to the spec. +func WithProfile(profile string) oci.SpecOpts { + return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { + s.Linux.Seccomp = &specs.LinuxSeccomp{} + f, err := ioutil.ReadFile(profile) + if err != nil { + return fmt.Errorf("Cannot load seccomp profile %q: %v", profile, err) + } + if err := json.Unmarshal(f, s.Linux.Seccomp); err != nil { + return fmt.Errorf("Decoding seccomp profile failed %q: %v", profile, err) + } + return nil + } +} + +// WithDefaultProfile sets the default seccomp profile to the spec. +// Note: must follow the setting of process capabilities +func WithDefaultProfile() oci.SpecOpts { + return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { + s.Linux.Seccomp = DefaultProfile(s) + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go new file mode 100644 index 0000000000..11b446a6ed --- /dev/null +++ b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go @@ -0,0 +1,581 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package seccomp + +import ( + "runtime" + "syscall" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +func arches() []specs.Arch { + switch runtime.GOARCH { + case "amd64": + return []specs.Arch{specs.ArchX86_64, specs.ArchX86, specs.ArchX32} + case "arm64": + return []specs.Arch{specs.ArchARM, specs.ArchAARCH64} + case "mips64": + return []specs.Arch{specs.ArchMIPS, specs.ArchMIPS64, specs.ArchMIPS64N32} + case "mips64n32": + return []specs.Arch{specs.ArchMIPS, specs.ArchMIPS64, specs.ArchMIPS64N32} + case "mipsel64": + return []specs.Arch{specs.ArchMIPSEL, specs.ArchMIPSEL64, specs.ArchMIPSEL64N32} + case "mipsel64n32": + return []specs.Arch{specs.ArchMIPSEL, specs.ArchMIPSEL64, specs.ArchMIPSEL64N32} + case "s390x": + return []specs.Arch{specs.ArchS390, specs.ArchS390X} + default: + return []specs.Arch{} + } +} + +// DefaultProfile defines the whitelist for the default seccomp profile. +func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { + syscalls := []specs.LinuxSyscall{ + { + Names: []string{ + "accept", + "accept4", + "access", + "alarm", + "alarm", + "bind", + "brk", + "capget", + "capset", + "chdir", + "chmod", + "chown", + "chown32", + "clock_getres", + "clock_gettime", + "clock_nanosleep", + "close", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "faccessat", + "fadvise64", + "fadvise64_64", + "fallocate", + "fanotify_mark", + "fchdir", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fdatasync", + "fgetxattr", + "flistxattr", + "flock", + "fork", + "fremovexattr", + "fsetxattr", + "fstat", + "fstat64", + "fstatat64", + "fstatfs", + "fstatfs64", + "fsync", + "ftruncate", + "ftruncate64", + "futex", + "futimesat", + "getcpu", + "getcwd", + "getdents", + "getdents64", + "getegid", + "getegid32", + "geteuid", + "geteuid32", + "getgid", + "getgid32", + "getgroups", + "getgroups32", + "getitimer", + "getpeername", + "getpgid", + "getpgrp", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "get_robust_list", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "get_thread_area", + "gettid", + "gettimeofday", + "getuid", + "getuid32", + "getxattr", + "inotify_add_watch", + "inotify_init", + "inotify_init1", + "inotify_rm_watch", + "io_cancel", + "ioctl", + "io_destroy", + "io_getevents", + "ioprio_get", + "ioprio_set", + "io_setup", + "io_submit", + "ipc", + "kill", + "lchown", + "lchown32", + "lgetxattr", + "link", + "linkat", + "listen", + "listxattr", + "llistxattr", + "_llseek", + "lremovexattr", + "lseek", + "lsetxattr", + "lstat", + "lstat64", + "madvise", + "memfd_create", + "mincore", + "mkdir", + "mkdirat", + "mknod", + "mknodat", + "mlock", + "mlock2", + "mlockall", + "mmap", + "mmap2", + "mprotect", + "mq_getsetattr", + "mq_notify", + "mq_open", + "mq_timedreceive", + "mq_timedsend", + "mq_unlink", + "mremap", + "msgctl", + "msgget", + "msgrcv", + "msgsnd", + "msync", + "munlock", + "munlockall", + "munmap", + "nanosleep", + "newfstatat", + "_newselect", + "open", + "openat", + "pause", + "pipe", + "pipe2", + "poll", + "ppoll", + "prctl", + "pread64", + "preadv", + "prlimit64", + "pselect6", + "pwrite64", + "pwritev", + "read", + "readahead", + "readlink", + "readlinkat", + "readv", + "recv", + "recvfrom", + "recvmmsg", + "recvmsg", + "remap_file_pages", + "removexattr", + "rename", + "renameat", + "renameat2", + "restart_syscall", + "rmdir", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_tgsigqueueinfo", + "sched_getaffinity", + "sched_getattr", + "sched_getparam", + "sched_get_priority_max", + "sched_get_priority_min", + "sched_getscheduler", + "sched_rr_get_interval", + "sched_setaffinity", + "sched_setattr", + "sched_setparam", + "sched_setscheduler", + "sched_yield", + "seccomp", + "select", + "semctl", + "semget", + "semop", + "semtimedop", + "send", + "sendfile", + "sendfile64", + "sendmmsg", + "sendmsg", + "sendto", + "setfsgid", + "setfsgid32", + "setfsuid", + "setfsuid32", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setitimer", + "setpgid", + "setpriority", + "setregid", + "setregid32", + "setresgid", + "setresgid32", + "setresuid", + "setresuid32", + "setreuid", + "setreuid32", + "setrlimit", + "set_robust_list", + "setsid", + "setsockopt", + "set_thread_area", + "set_tid_address", + "setuid", + "setuid32", + "setxattr", + "shmat", + "shmctl", + "shmdt", + "shmget", + "shutdown", + "sigaltstack", + "signalfd", + "signalfd4", + "sigreturn", + "socket", + "socketcall", + "socketpair", + "splice", + "stat", + "stat64", + "statfs", + "statfs64", + "symlink", + "symlinkat", + "sync", + "sync_file_range", + "syncfs", + "sysinfo", + "syslog", + "tee", + "tgkill", + "time", + "timer_create", + "timer_delete", + "timerfd_create", + "timerfd_gettime", + "timerfd_settime", + "timer_getoverrun", + "timer_gettime", + "timer_settime", + "times", + "tkill", + "truncate", + "truncate64", + "ugetrlimit", + "umask", + "uname", + "unlink", + "unlinkat", + "utime", + "utimensat", + "utimes", + "vfork", + "vmsplice", + "wait4", + "waitid", + "waitpid", + "write", + "writev", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }, + { + Names: []string{"personality"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{ + { + Index: 0, + Value: 0x0, + Op: specs.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{ + { + Index: 0, + Value: 0x0008, + Op: specs.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{ + { + Index: 0, + Value: 0xffffffff, + Op: specs.OpEqualTo, + }, + }, + }, + } + + s := &specs.LinuxSeccomp{ + DefaultAction: specs.ActErrno, + Architectures: arches(), + Syscalls: syscalls, + } + + // include by arch + switch runtime.GOARCH { + case "arm", "arm64": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "arm_fadvise64_64", + "arm_sync_file_range", + "breakpoint", + "cacheflush", + "set_tls", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "amd64": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "arch_prctl", + "modify_ldt", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "386": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "modify_ldt", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "s390", "s390x": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "s390_pci_mmio_read", + "s390_pci_mmio_write", + "s390_runtime_instr", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + } + + admin := false + for _, c := range sp.Process.Capabilities.Bounding { + switch c { + case "CAP_DAC_READ_SEARCH": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{"open_by_handle_at"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "CAP_SYS_ADMIN": + admin = true + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "bpf", + "clone", + "fanotify_init", + "lookup_dcookie", + "mount", + "name_to_handle_at", + "perf_event_open", + "setdomainname", + "sethostname", + "setns", + "umount", + "umount2", + "unshare", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "CAP_SYS_BOOT": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{"reboot"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "CAP_SYS_CHROOT": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{"chroot"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "CAP_SYS_MODULE": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "delete_module", + "init_module", + "finit_module", + "query_module", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "CAP_SYS_PACCT": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{"acct"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "CAP_SYS_PTRACE": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "kcmp", + "process_vm_readv", + "process_vm_writev", + "ptrace", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "CAP_SYS_RAWIO": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "iopl", + "ioperm", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "CAP_SYS_TIME": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "settimeofday", + "stime", + "adjtimex", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "CAP_SYS_TTY_CONFIG": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{"vhangup"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + } + } + + if !admin { + switch runtime.GOARCH { + case "s390", "s390x": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "clone", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{ + { + Index: 1, + Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + ValueTwo: 0, + Op: specs.OpMaskedEqual, + }, + }, + }) + default: + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "clone", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{ + { + Index: 0, + Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + ValueTwo: 0, + Op: specs.OpMaskedEqual, + }, + }, + }) + } + } + + return s +} diff --git a/vendor/github.com/google/shlex/COPYING b/vendor/github.com/google/shlex/COPYING new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/google/shlex/COPYING @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/shlex/README b/vendor/github.com/google/shlex/README new file mode 100644 index 0000000000..c86bcc066f --- /dev/null +++ b/vendor/github.com/google/shlex/README @@ -0,0 +1,2 @@ +go-shlex is a simple lexer for go that supports shell-style quoting, +commenting, and escaping. diff --git a/vendor/github.com/google/shlex/shlex.go b/vendor/github.com/google/shlex/shlex.go new file mode 100644 index 0000000000..3cb37b7e48 --- /dev/null +++ b/vendor/github.com/google/shlex/shlex.go @@ -0,0 +1,417 @@ +/* +Copyright 2012 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package shlex implements a simple lexer which splits input in to tokens using +shell-style rules for quoting and commenting. + +The basic use case uses the default ASCII lexer to split a string into sub-strings: + + shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"} + +To process a stream of strings: + + l := NewLexer(os.Stdin) + for ; token, err := l.Next(); err != nil { + // process token + } + +To access the raw token stream (which includes tokens for comments): + + t := NewTokenizer(os.Stdin) + for ; token, err := t.Next(); err != nil { + // process token + } + +*/ +package shlex + +import ( + "bufio" + "fmt" + "io" + "strings" +) + +// TokenType is a top-level token classification: A word, space, comment, unknown. +type TokenType int + +// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape. +type runeTokenClass int + +// the internal state used by the lexer state machine +type lexerState int + +// Token is a (type, value) pair representing a lexographical token. +type Token struct { + tokenType TokenType + value string +} + +// Equal reports whether tokens a, and b, are equal. +// Two tokens are equal if both their types and values are equal. A nil token can +// never be equal to another token. +func (a *Token) Equal(b *Token) bool { + if a == nil || b == nil { + return false + } + if a.tokenType != b.tokenType { + return false + } + return a.value == b.value +} + +// Named classes of UTF-8 runes +const ( + spaceRunes = " \t\r\n" + escapingQuoteRunes = `"` + nonEscapingQuoteRunes = "'" + escapeRunes = `\` + commentRunes = "#" +) + +// Classes of rune token +const ( + unknownRuneClass runeTokenClass = iota + spaceRuneClass + escapingQuoteRuneClass + nonEscapingQuoteRuneClass + escapeRuneClass + commentRuneClass + eofRuneClass +) + +// Classes of lexographic token +const ( + UnknownToken TokenType = iota + WordToken + SpaceToken + CommentToken +) + +// Lexer state machine states +const ( + startState lexerState = iota // no runes have been seen + inWordState // processing regular runes in a word + escapingState // we have just consumed an escape rune; the next rune is literal + escapingQuotedState // we have just consumed an escape rune within a quoted string + quotingEscapingState // we are within a quoted string that supports escaping ("...") + quotingState // we are within a string that does not support escaping ('...') + commentState // we are within a comment (everything following an unquoted or unescaped # +) + +// tokenClassifier is used for classifying rune characters. +type tokenClassifier map[rune]runeTokenClass + +func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) { + for _, runeChar := range runes { + typeMap[runeChar] = tokenType + } +} + +// newDefaultClassifier creates a new classifier for ASCII characters. +func newDefaultClassifier() tokenClassifier { + t := tokenClassifier{} + t.addRuneClass(spaceRunes, spaceRuneClass) + t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass) + t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass) + t.addRuneClass(escapeRunes, escapeRuneClass) + t.addRuneClass(commentRunes, commentRuneClass) + return t +} + +// ClassifyRune classifiees a rune +func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass { + return t[runeVal] +} + +// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped. +type Lexer Tokenizer + +// NewLexer creates a new lexer from an input stream. +func NewLexer(r io.Reader) *Lexer { + + return (*Lexer)(NewTokenizer(r)) +} + +// Next returns the next word, or an error. If there are no more words, +// the error will be io.EOF. +func (l *Lexer) Next() (string, error) { + for { + token, err := (*Tokenizer)(l).Next() + if err != nil { + return "", err + } + switch token.tokenType { + case WordToken: + return token.value, nil + case CommentToken: + // skip comments + default: + return "", fmt.Errorf("Unknown token type: %v", token.tokenType) + } + } +} + +// Tokenizer turns an input stream into a sequence of typed tokens +type Tokenizer struct { + input bufio.Reader + classifier tokenClassifier +} + +// NewTokenizer creates a new tokenizer from an input stream. +func NewTokenizer(r io.Reader) *Tokenizer { + input := bufio.NewReader(r) + classifier := newDefaultClassifier() + return &Tokenizer{ + input: *input, + classifier: classifier} +} + +// scanStream scans the stream for the next token using the internal state machine. +// It will panic if it encounters a rune which it does not know how to handle. +func (t *Tokenizer) scanStream() (*Token, error) { + state := startState + var tokenType TokenType + var value []rune + var nextRune rune + var nextRuneType runeTokenClass + var err error + + for { + nextRune, _, err = t.input.ReadRune() + nextRuneType = t.classifier.ClassifyRune(nextRune) + + if err == io.EOF { + nextRuneType = eofRuneClass + err = nil + } else if err != nil { + return nil, err + } + + switch state { + case startState: // no runes read yet + { + switch nextRuneType { + case eofRuneClass: + { + return nil, io.EOF + } + case spaceRuneClass: + { + } + case escapingQuoteRuneClass: + { + tokenType = WordToken + state = quotingEscapingState + } + case nonEscapingQuoteRuneClass: + { + tokenType = WordToken + state = quotingState + } + case escapeRuneClass: + { + tokenType = WordToken + state = escapingState + } + case commentRuneClass: + { + tokenType = CommentToken + state = commentState + } + default: + { + tokenType = WordToken + value = append(value, nextRune) + state = inWordState + } + } + } + case inWordState: // in a regular word + { + switch nextRuneType { + case eofRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case spaceRuneClass: + { + t.input.UnreadRune() + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case escapingQuoteRuneClass: + { + state = quotingEscapingState + } + case nonEscapingQuoteRuneClass: + { + state = quotingState + } + case escapeRuneClass: + { + state = escapingState + } + default: + { + value = append(value, nextRune) + } + } + } + case escapingState: // the rune after an escape character + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found after escape character") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + default: + { + state = inWordState + value = append(value, nextRune) + } + } + } + case escapingQuotedState: // the next rune after an escape character, in double quotes + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found after escape character") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + default: + { + state = quotingEscapingState + value = append(value, nextRune) + } + } + } + case quotingEscapingState: // in escaping double quotes + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found when expecting closing quote") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case escapingQuoteRuneClass: + { + state = inWordState + } + case escapeRuneClass: + { + state = escapingQuotedState + } + default: + { + value = append(value, nextRune) + } + } + } + case quotingState: // in non-escaping single quotes + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found when expecting closing quote") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case nonEscapingQuoteRuneClass: + { + state = inWordState + } + default: + { + value = append(value, nextRune) + } + } + } + case commentState: // in a comment + { + switch nextRuneType { + case eofRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case spaceRuneClass: + { + if nextRune == '\n' { + state = startState + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } else { + value = append(value, nextRune) + } + } + default: + { + value = append(value, nextRune) + } + } + } + default: + { + return nil, fmt.Errorf("Unexpected state: %v", state) + } + } + } +} + +// Next returns the next token in the stream. +func (t *Tokenizer) Next() (*Token, error) { + return t.scanStream() +} + +// Split partitions a string into a slice of strings. +func Split(s string) ([]string, error) { + l := NewLexer(strings.NewReader(s)) + subStrings := make([]string, 0) + for { + word, err := l.Next() + if err != nil { + if err == io.EOF { + return subStrings, nil + } + return subStrings, err + } + subStrings = append(subStrings, word) + } +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go index b25558388f..c7172c406b 100644 --- a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go +++ b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go @@ -2,6 +2,7 @@ package iradix import ( "bytes" + "strings" "github.com/hashicorp/golang-lru/simplelru" ) @@ -11,7 +12,9 @@ const ( // cache used per transaction. This is used to cache the updates // to the nodes near the root, while the leaves do not need to be // cached. This is important for very large transactions to prevent - // the modified cache from growing to be enormous. + // the modified cache from growing to be enormous. This is also used + // to set the max size of the mutation notify maps since those should + // also be bounded in a similar way. defaultModifiedCache = 8192 ) @@ -27,7 +30,11 @@ type Tree struct { // New returns an empty Tree func New() *Tree { - t := &Tree{root: &Node{}} + t := &Tree{ + root: &Node{ + mutateCh: make(chan struct{}), + }, + } return t } @@ -40,75 +47,208 @@ func (t *Tree) Len() int { // atomically and returns a new tree when committed. A transaction // is not thread safe, and should only be used by a single goroutine. type Txn struct { - root *Node - size int - modified *simplelru.LRU + // root is the modified root for the transaction. + root *Node + + // snap is a snapshot of the root node for use if we have to run the + // slow notify algorithm. + snap *Node + + // size tracks the size of the tree as it is modified during the + // transaction. + size int + + // writable is a cache of writable nodes that have been created during + // the course of the transaction. This allows us to re-use the same + // nodes for further writes and avoid unnecessary copies of nodes that + // have never been exposed outside the transaction. This will only hold + // up to defaultModifiedCache number of entries. + writable *simplelru.LRU + + // trackChannels is used to hold channels that need to be notified to + // signal mutation of the tree. This will only hold up to + // defaultModifiedCache number of entries, after which we will set the + // trackOverflow flag, which will cause us to use a more expensive + // algorithm to perform the notifications. Mutation tracking is only + // performed if trackMutate is true. + trackChannels map[chan struct{}]struct{} + trackOverflow bool + trackMutate bool } // Txn starts a new transaction that can be used to mutate the tree func (t *Tree) Txn() *Txn { txn := &Txn{ root: t.root, + snap: t.root, size: t.size, } return txn } -// writeNode returns a node to be modified, if the current -// node as already been modified during the course of -// the transaction, it is used in-place. -func (t *Txn) writeNode(n *Node) *Node { - // Ensure the modified set exists - if t.modified == nil { +// TrackMutate can be used to toggle if mutations are tracked. If this is enabled +// then notifications will be issued for affected internal nodes and leaves when +// the transaction is committed. +func (t *Txn) TrackMutate(track bool) { + t.trackMutate = track +} + +// trackChannel safely attempts to track the given mutation channel, setting the +// overflow flag if we can no longer track any more. This limits the amount of +// state that will accumulate during a transaction and we have a slower algorithm +// to switch to if we overflow. +func (t *Txn) trackChannel(ch chan struct{}) { + // In overflow, make sure we don't store any more objects. + if t.trackOverflow { + return + } + + // If this would overflow the state we reject it and set the flag (since + // we aren't tracking everything that's required any longer). + if len(t.trackChannels) >= defaultModifiedCache { + // Mark that we are in the overflow state + t.trackOverflow = true + + // Clear the map so that the channels can be garbage collected. It is + // safe to do this since we have already overflowed and will be using + // the slow notify algorithm. + t.trackChannels = nil + return + } + + // Create the map on the fly when we need it. + if t.trackChannels == nil { + t.trackChannels = make(map[chan struct{}]struct{}) + } + + // Otherwise we are good to track it. + t.trackChannels[ch] = struct{}{} +} + +// writeNode returns a node to be modified, if the current node has already been +// modified during the course of the transaction, it is used in-place. Set +// forLeafUpdate to true if you are getting a write node to update the leaf, +// which will set leaf mutation tracking appropriately as well. +func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node { + // Ensure the writable set exists. + if t.writable == nil { lru, err := simplelru.NewLRU(defaultModifiedCache, nil) if err != nil { panic(err) } - t.modified = lru + t.writable = lru } - // If this node has already been modified, we can - // continue to use it during this transaction. - if _, ok := t.modified.Get(n); ok { + // If this node has already been modified, we can continue to use it + // during this transaction. We know that we don't need to track it for + // a node update since the node is writable, but if this is for a leaf + // update we track it, in case the initial write to this node didn't + // update the leaf. + if _, ok := t.writable.Get(n); ok { + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } return n } - // Copy the existing node - nc := new(Node) + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Copy the existing node. If you have set forLeafUpdate it will be + // safe to replace this leaf with another after you get your node for + // writing. You MUST replace it, because the channel associated with + // this leaf will be closed when this transaction is committed. + nc := &Node{ + mutateCh: make(chan struct{}), + leaf: n.leaf, + } if n.prefix != nil { nc.prefix = make([]byte, len(n.prefix)) copy(nc.prefix, n.prefix) } - if n.leaf != nil { - nc.leaf = new(leafNode) - *nc.leaf = *n.leaf - } if len(n.edges) != 0 { nc.edges = make([]edge, len(n.edges)) copy(nc.edges, n.edges) } - // Mark this node as modified - t.modified.Add(n, nil) + // Mark this node as writable. + t.writable.Add(nc, nil) return nc } +// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction +// Returns the size of the subtree visited +func (t *Txn) trackChannelsAndCount(n *Node) int { + // Count only leaf nodes + leaves := 0 + if n.leaf != nil { + leaves = 1 + } + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Recurse on the children + for _, e := range n.edges { + leaves += t.trackChannelsAndCount(e.node) + } + return leaves +} + +// mergeChild is called to collapse the given node with its child. This is only +// called when the given node is not a leaf and has a single edge. +func (t *Txn) mergeChild(n *Node) { + // Mark the child node as being mutated since we are about to abandon + // it. We don't need to mark the leaf since we are retaining it if it + // is there. + e := n.edges[0] + child := e.node + if t.trackMutate { + t.trackChannel(child.mutateCh) + } + + // Merge the nodes. + n.prefix = concat(n.prefix, child.prefix) + n.leaf = child.leaf + if len(child.edges) != 0 { + n.edges = make([]edge, len(child.edges)) + copy(n.edges, child.edges) + } else { + n.edges = nil + } +} + // insert does a recursive insertion func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) { - // Handle key exhaution + // Handle key exhaustion if len(search) == 0 { - nc := t.writeNode(n) + var oldVal interface{} + didUpdate := false if n.isLeaf() { - old := nc.leaf.val - nc.leaf.val = v - return nc, old, true - } else { - nc.leaf = &leafNode{ - key: k, - val: v, - } - return nc, nil, false + oldVal = n.leaf.val + didUpdate = true } + + nc := t.writeNode(n, true) + nc.leaf = &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + return nc, oldVal, didUpdate } // Look for the edge @@ -119,14 +259,16 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface e := edge{ label: search[0], node: &Node{ + mutateCh: make(chan struct{}), leaf: &leafNode{ - key: k, - val: v, + mutateCh: make(chan struct{}), + key: k, + val: v, }, prefix: search, }, } - nc := t.writeNode(n) + nc := t.writeNode(n, false) nc.addEdge(e) return nc, nil, false } @@ -137,7 +279,7 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface search = search[commonPrefix:] newChild, oldVal, didUpdate := t.insert(child, k, search, v) if newChild != nil { - nc := t.writeNode(n) + nc := t.writeNode(n, false) nc.edges[idx].node = newChild return nc, oldVal, didUpdate } @@ -145,9 +287,10 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface } // Split the node - nc := t.writeNode(n) + nc := t.writeNode(n, false) splitNode := &Node{ - prefix: search[:commonPrefix], + mutateCh: make(chan struct{}), + prefix: search[:commonPrefix], } nc.replaceEdge(edge{ label: search[0], @@ -155,7 +298,7 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface }) // Restore the existing child node - modChild := t.writeNode(child) + modChild := t.writeNode(child, false) splitNode.addEdge(edge{ label: modChild.prefix[commonPrefix], node: modChild, @@ -164,8 +307,9 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface // Create a new leaf node leaf := &leafNode{ - key: k, - val: v, + mutateCh: make(chan struct{}), + key: k, + val: v, } // If the new key is a subset, add to to this node @@ -179,8 +323,9 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface splitNode.addEdge(edge{ label: search[0], node: &Node{ - leaf: leaf, - prefix: search, + mutateCh: make(chan struct{}), + leaf: leaf, + prefix: search, }, }) return nc, nil, false @@ -188,19 +333,19 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface // delete does a recursive deletion func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { - // Check for key exhaution + // Check for key exhaustion if len(search) == 0 { if !n.isLeaf() { return nil, nil } // Remove the leaf node - nc := t.writeNode(n) + nc := t.writeNode(n, true) nc.leaf = nil // Check if this node should be merged if n != t.root && len(nc.edges) == 1 { - nc.mergeChild() + t.mergeChild(nc) } return nc, n.leaf } @@ -219,14 +364,17 @@ func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { return nil, nil } - // Copy this node - nc := t.writeNode(n) + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + nc := t.writeNode(n, false) // Delete the edge if the node has no edges if newChild.leaf == nil && len(newChild.edges) == 0 { nc.delEdge(label) if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { - nc.mergeChild() + t.mergeChild(nc) } } else { nc.edges[idx].node = newChild @@ -234,6 +382,56 @@ func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { return nc, leaf } +// delete does a recursive deletion +func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) { + // Check for key exhaustion + if len(search) == 0 { + nc := t.writeNode(n, true) + if n.isLeaf() { + nc.leaf = nil + } + nc.edges = nil + return nc, t.trackChannelsAndCount(n) + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix + // Need to do both so that we can delete prefixes that don't correspond to any node in the tree + if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) { + return nil, 0 + } + + // Consume the search prefix + if len(child.prefix) > len(search) { + search = []byte("") + } else { + search = search[len(child.prefix):] + } + newChild, numDeletions := t.deletePrefix(n, child, search) + if newChild == nil { + return nil, 0 + } + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, numDeletions +} + // Insert is used to add or update a given key. The return provides // the previous value and a bool indicating if any was set. func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) { @@ -261,6 +459,19 @@ func (t *Txn) Delete(k []byte) (interface{}, bool) { return nil, false } +// DeletePrefix is used to delete an entire subtree that matches the prefix +// This will delete all nodes under that prefix +func (t *Txn) DeletePrefix(prefix []byte) bool { + newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix) + if newRoot != nil { + t.root = newRoot + t.size = t.size - numDeletions + return true + } + return false + +} + // Root returns the current root of the radix tree within this // transaction. The root is not safe across insert and delete operations, // but can be used to read the current state during a transaction. @@ -274,10 +485,115 @@ func (t *Txn) Get(k []byte) (interface{}, bool) { return t.root.Get(k) } -// Commit is used to finalize the transaction and return a new tree +// GetWatch is used to lookup a specific key, returning +// the watch channel, value and if it was found +func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + return t.root.GetWatch(k) +} + +// Commit is used to finalize the transaction and return a new tree. If mutation +// tracking is turned on then notifications will also be issued. func (t *Txn) Commit() *Tree { - t.modified = nil - return &Tree{t.root, t.size} + nt := t.CommitOnly() + if t.trackMutate { + t.Notify() + } + return nt +} + +// CommitOnly is used to finalize the transaction and return a new tree, but +// does not issue any notifications until Notify is called. +func (t *Txn) CommitOnly() *Tree { + nt := &Tree{t.root, t.size} + t.writable = nil + return nt +} + +// slowNotify does a complete comparison of the before and after trees in order +// to trigger notifications. This doesn't require any additional state but it +// is very expensive to compute. +func (t *Txn) slowNotify() { + snapIter := t.snap.rawIterator() + rootIter := t.root.rawIterator() + for snapIter.Front() != nil || rootIter.Front() != nil { + // If we've exhausted the nodes in the old snapshot, we know + // there's nothing remaining to notify. + if snapIter.Front() == nil { + return + } + snapElem := snapIter.Front() + + // If we've exhausted the nodes in the new root, we know we need + // to invalidate everything that remains in the old snapshot. We + // know from the loop condition there's something in the old + // snapshot. + if rootIter.Front() == nil { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // Do one string compare so we can check the various conditions + // below without repeating the compare. + cmp := strings.Compare(snapIter.Path(), rootIter.Path()) + + // If the snapshot is behind the root, then we must have deleted + // this node during the transaction. + if cmp < 0 { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // If the snapshot is ahead of the root, then we must have added + // this node during the transaction. + if cmp > 0 { + rootIter.Next() + continue + } + + // If we have the same path, then we need to see if we mutated a + // node and possibly the leaf. + rootElem := rootIter.Front() + if snapElem != rootElem { + close(snapElem.mutateCh) + if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) { + close(snapElem.leaf.mutateCh) + } + } + snapIter.Next() + rootIter.Next() + } +} + +// Notify is used along with TrackMutate to trigger notifications. This must +// only be done once a transaction is committed via CommitOnly, and it is called +// automatically by Commit. +func (t *Txn) Notify() { + if !t.trackMutate { + return + } + + // If we've overflowed the tracking state we can't use it in any way and + // need to do a full tree compare. + if t.trackOverflow { + t.slowNotify() + } else { + for ch := range t.trackChannels { + close(ch) + } + } + + // Clean up the tracking state so that a re-notify is safe (will trigger + // the else clause above which will be a no-op). + t.trackChannels = nil + t.trackOverflow = false } // Insert is used to add or update a given key. The return provides @@ -296,6 +612,14 @@ func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) { return txn.Commit(), old, ok } +// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree, +// and a bool indicating if the prefix matched any nodes +func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) { + txn := t.Txn() + ok := txn.DeletePrefix(k) + return txn.Commit(), ok +} + // Root returns the root node of the tree which can be used for richer // query operations. func (t *Tree) Root() *Node { diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go index 75cbaa110f..9815e02538 100644 --- a/vendor/github.com/hashicorp/go-immutable-radix/iter.go +++ b/vendor/github.com/hashicorp/go-immutable-radix/iter.go @@ -9,11 +9,13 @@ type Iterator struct { stack []edges } -// SeekPrefix is used to seek the iterator to a given prefix -func (i *Iterator) SeekPrefix(prefix []byte) { +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { // Wipe the stack i.stack = nil n := i.node + watch = n.mutateCh search := prefix for { // Check for key exhaution @@ -29,6 +31,9 @@ func (i *Iterator) SeekPrefix(prefix []byte) { return } + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + // Consume the search prefix if bytes.HasPrefix(search, n.prefix) { search = search[len(n.prefix):] @@ -43,6 +48,11 @@ func (i *Iterator) SeekPrefix(prefix []byte) { } } +// SeekPrefix is used to seek the iterator to a given prefix +func (i *Iterator) SeekPrefix(prefix []byte) { + i.SeekPrefixWatch(prefix) +} + // Next returns the next node in order func (i *Iterator) Next() ([]byte, interface{}, bool) { // Initialize our stack if needed diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go index fea6f63436..ef494fa7ca 100644 --- a/vendor/github.com/hashicorp/go-immutable-radix/node.go +++ b/vendor/github.com/hashicorp/go-immutable-radix/node.go @@ -12,8 +12,9 @@ type WalkFn func(k []byte, v interface{}) bool // leafNode is used to represent a value type leafNode struct { - key []byte - val interface{} + mutateCh chan struct{} + key []byte + val interface{} } // edge is used to represent an edge node @@ -24,6 +25,9 @@ type edge struct { // Node is an immutable node in the radix tree type Node struct { + // mutateCh is closed if this node is modified + mutateCh chan struct{} + // leaf is used to store possible leaf leaf *leafNode @@ -87,31 +91,14 @@ func (n *Node) delEdge(label byte) { } } -func (n *Node) mergeChild() { - e := n.edges[0] - child := e.node - n.prefix = concat(n.prefix, child.prefix) - if child.leaf != nil { - n.leaf = new(leafNode) - *n.leaf = *child.leaf - } else { - n.leaf = nil - } - if len(child.edges) != 0 { - n.edges = make([]edge, len(child.edges)) - copy(n.edges, child.edges) - } else { - n.edges = nil - } -} - -func (n *Node) Get(k []byte) (interface{}, bool) { +func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { search := k + watch := n.mutateCh for { - // Check for key exhaution + // Check for key exhaustion if len(search) == 0 { if n.isLeaf() { - return n.leaf.val, true + return n.leaf.mutateCh, n.leaf.val, true } break } @@ -122,6 +109,9 @@ func (n *Node) Get(k []byte) (interface{}, bool) { break } + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + // Consume the search prefix if bytes.HasPrefix(search, n.prefix) { search = search[len(n.prefix):] @@ -129,7 +119,12 @@ func (n *Node) Get(k []byte) (interface{}, bool) { break } } - return nil, false + return watch, nil, false +} + +func (n *Node) Get(k []byte) (interface{}, bool) { + _, val, ok := n.GetWatch(k) + return val, ok } // LongestPrefix is like Get, but instead of an @@ -204,6 +199,14 @@ func (n *Node) Iterator() *Iterator { return &Iterator{node: n} } +// rawIterator is used to return a raw iterator at the given node to walk the +// tree. +func (n *Node) rawIterator() *rawIterator { + iter := &rawIterator{node: n} + iter.Next() + return iter +} + // Walk is used to walk the tree func (n *Node) Walk(fn WalkFn) { recursiveWalk(n, fn) @@ -271,6 +274,66 @@ func (n *Node) WalkPath(path []byte, fn WalkFn) { } } +func (n *Node) Seek(prefix []byte) *Seeker { + search := prefix + p := &pos{n: n} + for { + // Check for key exhaution + if len(search) == 0 { + return &Seeker{p} + } + + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= search[0] + }) + p.current = idx + if idx < len(n.edges) { + n = n.edges[idx].node + if bytes.HasPrefix(search, n.prefix) && len(n.edges) > 0 { + search = search[len(n.prefix):] + p.current++ + p = &pos{n: n, prev: p} + continue + } + } + p.current++ + return &Seeker{p} + } +} + +type Seeker struct { + *pos +} + +type pos struct { + n *Node + current int + prev *pos + isLeaf bool +} + +func (s *Seeker) Next() (k []byte, v interface{}, ok bool) { + if s.current >= len(s.n.edges) { + if s.prev == nil { + return nil, nil, false + } + s.pos = s.prev + return s.Next() + } + + edge := s.n.edges[s.current] + s.current++ + if edge.node.leaf != nil && !s.isLeaf { + s.isLeaf = true + s.current-- + return edge.node.leaf.key, edge.node.leaf.val, true + } + s.isLeaf = false + s.pos = &pos{n: edge.node, prev: s.pos} + return s.Next() +} + // recursiveWalk is used to do a pre-order walk of a node // recursively. Returns true if the walk should be aborted func recursiveWalk(n *Node, fn WalkFn) bool { diff --git a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go new file mode 100644 index 0000000000..04814c1323 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go @@ -0,0 +1,78 @@ +package iradix + +// rawIterator visits each of the nodes in the tree, even the ones that are not +// leaves. It keeps track of the effective path (what a leaf at a given node +// would be called), which is useful for comparing trees. +type rawIterator struct { + // node is the starting node in the tree for the iterator. + node *Node + + // stack keeps track of edges in the frontier. + stack []rawStackEntry + + // pos is the current position of the iterator. + pos *Node + + // path is the effective path of the current iterator position, + // regardless of whether the current node is a leaf. + path string +} + +// rawStackEntry is used to keep track of the cumulative common path as well as +// its associated edges in the frontier. +type rawStackEntry struct { + path string + edges edges +} + +// Front returns the current node that has been iterated to. +func (i *rawIterator) Front() *Node { + return i.pos +} + +// Path returns the effective path of the current node, even if it's not actually +// a leaf. +func (i *rawIterator) Path() string { + return i.path +} + +// Next advances the iterator to the next node. +func (i *rawIterator) Next() { + // Initialize our stack if needed. + if i.stack == nil && i.node != nil { + i.stack = []rawStackEntry{ + rawStackEntry{ + edges: edges{ + edge{node: i.node}, + }, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack. + n := len(i.stack) + last := i.stack[n-1] + elem := last.edges[0].node + + // Update the stack. + if len(last.edges) > 1 { + i.stack[n-1].edges = last.edges[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier. + if len(elem.edges) > 0 { + path := last.path + string(elem.prefix) + i.stack = append(i.stack, rawStackEntry{path, elem.edges}) + } + + i.pos = elem + i.path = last.path + string(elem.prefix) + return + } + + i.pos = nil + i.path = "" +} diff --git a/vendor/github.com/mitchellh/hashstructure/LICENSE b/vendor/github.com/mitchellh/hashstructure/LICENSE new file mode 100644 index 0000000000..a3866a291f --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/hashstructure/README.md b/vendor/github.com/mitchellh/hashstructure/README.md new file mode 100644 index 0000000000..28ce45a3e1 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/README.md @@ -0,0 +1,65 @@ +# hashstructure [![GoDoc](https://godoc.org/github.com/mitchellh/hashstructure?status.svg)](https://godoc.org/github.com/mitchellh/hashstructure) + +hashstructure is a Go library for creating a unique hash value +for arbitrary values in Go. + +This can be used to key values in a hash (for use in a map, set, etc.) +that are complex. The most common use case is comparing two values without +sending data across the network, caching values locally (de-dup), and so on. + +## Features + + * Hash any arbitrary Go value, including complex types. + + * Tag a struct field to ignore it and not affect the hash value. + + * Tag a slice type struct field to treat it as a set where ordering + doesn't affect the hash code but the field itself is still taken into + account to create the hash value. + + * Optionally specify a custom hash function to optimize for speed, collision + avoidance for your data set, etc. + + * Optionally hash the output of `.String()` on structs that implement fmt.Stringer, + allowing effective hashing of time.Time + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/hashstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure). + +A quick code example is shown below: + +```go +type ComplexStruct struct { + Name string + Age uint + Metadata map[string]interface{} +} + +v := ComplexStruct{ + Name: "mitchellh", + Age: 64, + Metadata: map[string]interface{}{ + "car": true, + "location": "California", + "siblings": []string{"Bob", "John"}, + }, +} + +hash, err := hashstructure.Hash(v, nil) +if err != nil { + panic(err) +} + +fmt.Printf("%d", hash) +// Output: +// 2307517237273902113 +``` diff --git a/vendor/github.com/mitchellh/hashstructure/hashstructure.go b/vendor/github.com/mitchellh/hashstructure/hashstructure.go new file mode 100644 index 0000000000..ea13a1583c --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/hashstructure.go @@ -0,0 +1,358 @@ +package hashstructure + +import ( + "encoding/binary" + "fmt" + "hash" + "hash/fnv" + "reflect" +) + +// ErrNotStringer is returned when there's an error with hash:"string" +type ErrNotStringer struct { + Field string +} + +// Error implements error for ErrNotStringer +func (ens *ErrNotStringer) Error() string { + return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field) +} + +// HashOptions are options that are available for hashing. +type HashOptions struct { + // Hasher is the hash function to use. If this isn't set, it will + // default to FNV. + Hasher hash.Hash64 + + // TagName is the struct tag to look at when hashing the structure. + // By default this is "hash". + TagName string + + // ZeroNil is flag determining if nil pointer should be treated equal + // to a zero value of pointed type. By default this is false. + ZeroNil bool +} + +// Hash returns the hash value of an arbitrary value. +// +// If opts is nil, then default options will be used. See HashOptions +// for the default values. The same *HashOptions value cannot be used +// concurrently. None of the values within a *HashOptions struct are +// safe to read/write while hashing is being done. +// +// Notes on the value: +// +// * Unexported fields on structs are ignored and do not affect the +// hash value. +// +// * Adding an exported field to a struct with the zero value will change +// the hash value. +// +// For structs, the hashing can be controlled using tags. For example: +// +// struct { +// Name string +// UUID string `hash:"ignore"` +// } +// +// The available tag values are: +// +// * "ignore" or "-" - The field will be ignored and not affect the hash code. +// +// * "set" - The field will be treated as a set, where ordering doesn't +// affect the hash code. This only works for slices. +// +// * "string" - The field will be hashed as a string, only works when the +// field implements fmt.Stringer +// +func Hash(v interface{}, opts *HashOptions) (uint64, error) { + // Create default options + if opts == nil { + opts = &HashOptions{} + } + if opts.Hasher == nil { + opts.Hasher = fnv.New64() + } + if opts.TagName == "" { + opts.TagName = "hash" + } + + // Reset the hash + opts.Hasher.Reset() + + // Create our walker and walk the structure + w := &walker{ + h: opts.Hasher, + tag: opts.TagName, + zeronil: opts.ZeroNil, + } + return w.visit(reflect.ValueOf(v), nil) +} + +type walker struct { + h hash.Hash64 + tag string + zeronil bool +} + +type visitOpts struct { + // Flags are a bitmask of flags to affect behavior of this visit + Flags visitFlag + + // Information about the struct containing this field + Struct interface{} + StructField string +} + +func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { + t := reflect.TypeOf(0) + + // Loop since these can be wrapped in multiple layers of pointers + // and interfaces. + for { + // If we have an interface, dereference it. We have to do this up + // here because it might be a nil in there and the check below must + // catch that. + if v.Kind() == reflect.Interface { + v = v.Elem() + continue + } + + if v.Kind() == reflect.Ptr { + if w.zeronil { + t = v.Type().Elem() + } + v = reflect.Indirect(v) + continue + } + + break + } + + // If it is nil, treat it like a zero. + if !v.IsValid() { + v = reflect.Zero(t) + } + + // Binary writing can use raw ints, we have to convert to + // a sized-int, we'll choose the largest... + switch v.Kind() { + case reflect.Int: + v = reflect.ValueOf(int64(v.Int())) + case reflect.Uint: + v = reflect.ValueOf(uint64(v.Uint())) + case reflect.Bool: + var tmp int8 + if v.Bool() { + tmp = 1 + } + v = reflect.ValueOf(tmp) + } + + k := v.Kind() + + // We can shortcut numeric values by directly binary writing them + if k >= reflect.Int && k <= reflect.Complex64 { + // A direct hash calculation + w.h.Reset() + err := binary.Write(w.h, binary.LittleEndian, v.Interface()) + return w.h.Sum64(), err + } + + switch k { + case reflect.Array: + var h uint64 + l := v.Len() + for i := 0; i < l; i++ { + current, err := w.visit(v.Index(i), nil) + if err != nil { + return 0, err + } + + h = hashUpdateOrdered(w.h, h, current) + } + + return h, nil + + case reflect.Map: + var includeMap IncludableMap + if opts != nil && opts.Struct != nil { + if v, ok := opts.Struct.(IncludableMap); ok { + includeMap = v + } + } + + // Build the hash for the map. We do this by XOR-ing all the key + // and value hashes. This makes it deterministic despite ordering. + var h uint64 + for _, k := range v.MapKeys() { + v := v.MapIndex(k) + if includeMap != nil { + incl, err := includeMap.HashIncludeMap( + opts.StructField, k.Interface(), v.Interface()) + if err != nil { + return 0, err + } + if !incl { + continue + } + } + + kh, err := w.visit(k, nil) + if err != nil { + return 0, err + } + vh, err := w.visit(v, nil) + if err != nil { + return 0, err + } + + fieldHash := hashUpdateOrdered(w.h, kh, vh) + h = hashUpdateUnordered(h, fieldHash) + } + + return h, nil + + case reflect.Struct: + parent := v.Interface() + var include Includable + if impl, ok := parent.(Includable); ok { + include = impl + } + + t := v.Type() + h, err := w.visit(reflect.ValueOf(t.Name()), nil) + if err != nil { + return 0, err + } + + l := v.NumField() + for i := 0; i < l; i++ { + if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { + var f visitFlag + fieldType := t.Field(i) + if fieldType.PkgPath != "" { + // Unexported + continue + } + + tag := fieldType.Tag.Get(w.tag) + if tag == "ignore" || tag == "-" { + // Ignore this field + continue + } + + // if string is set, use the string value + if tag == "string" { + if impl, ok := innerV.Interface().(fmt.Stringer); ok { + innerV = reflect.ValueOf(impl.String()) + } else { + return 0, &ErrNotStringer{ + Field: v.Type().Field(i).Name, + } + } + } + + // Check if we implement includable and check it + if include != nil { + incl, err := include.HashInclude(fieldType.Name, innerV) + if err != nil { + return 0, err + } + if !incl { + continue + } + } + + switch tag { + case "set": + f |= visitFlagSet + } + + kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil) + if err != nil { + return 0, err + } + + vh, err := w.visit(innerV, &visitOpts{ + Flags: f, + Struct: parent, + StructField: fieldType.Name, + }) + if err != nil { + return 0, err + } + + fieldHash := hashUpdateOrdered(w.h, kh, vh) + h = hashUpdateUnordered(h, fieldHash) + } + } + + return h, nil + + case reflect.Slice: + // We have two behaviors here. If it isn't a set, then we just + // visit all the elements. If it is a set, then we do a deterministic + // hash code. + var h uint64 + var set bool + if opts != nil { + set = (opts.Flags & visitFlagSet) != 0 + } + l := v.Len() + for i := 0; i < l; i++ { + current, err := w.visit(v.Index(i), nil) + if err != nil { + return 0, err + } + + if set { + h = hashUpdateUnordered(h, current) + } else { + h = hashUpdateOrdered(w.h, h, current) + } + } + + return h, nil + + case reflect.String: + // Directly hash + w.h.Reset() + _, err := w.h.Write([]byte(v.String())) + return w.h.Sum64(), err + + default: + return 0, fmt.Errorf("unknown kind to hash: %s", k) + } + +} + +func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 { + // For ordered updates, use a real hash function + h.Reset() + + // We just panic if the binary writes fail because we are writing + // an int64 which should never be fail-able. + e1 := binary.Write(h, binary.LittleEndian, a) + e2 := binary.Write(h, binary.LittleEndian, b) + if e1 != nil { + panic(e1) + } + if e2 != nil { + panic(e2) + } + + return h.Sum64() +} + +func hashUpdateUnordered(a, b uint64) uint64 { + return a ^ b +} + +// visitFlag is used as a bitmask for affecting visit behavior +type visitFlag uint + +const ( + visitFlagInvalid visitFlag = iota + visitFlagSet = iota << 1 +) diff --git a/vendor/github.com/mitchellh/hashstructure/include.go b/vendor/github.com/mitchellh/hashstructure/include.go new file mode 100644 index 0000000000..b6289c0bee --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/include.go @@ -0,0 +1,15 @@ +package hashstructure + +// Includable is an interface that can optionally be implemented by +// a struct. It will be called for each field in the struct to check whether +// it should be included in the hash. +type Includable interface { + HashInclude(field string, v interface{}) (bool, error) +} + +// IncludableMap is an interface that can optionally be implemented by +// a struct. It will be called when a map-type field is found to ask the +// struct if the map item should be included in the hash. +type IncludableMap interface { + HashIncludeMap(field string, k, v interface{}) (bool, error) +} diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.pb.go b/vendor/github.com/moby/buildkit/api/services/control/control.pb.go new file mode 100644 index 0000000000..e92bd7f04d --- /dev/null +++ b/vendor/github.com/moby/buildkit/api/services/control/control.pb.go @@ -0,0 +1,4871 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: control.proto + +/* + Package moby_buildkit_v1 is a generated protocol buffer package. + + It is generated from these files: + control.proto + + It has these top-level messages: + PruneRequest + DiskUsageRequest + DiskUsageResponse + UsageRecord + SolveRequest + CacheOptions + SolveResponse + StatusRequest + StatusResponse + Vertex + VertexStatus + VertexLog + BytesMessage + ListWorkersRequest + ListWorkersResponse + WorkerRecord +*/ +package moby_buildkit_v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/golang/protobuf/ptypes/timestamp" +import pb "github.com/moby/buildkit/solver/pb" + +import time "time" +import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import types "github.com/gogo/protobuf/types" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type PruneRequest struct { +} + +func (m *PruneRequest) Reset() { *m = PruneRequest{} } +func (m *PruneRequest) String() string { return proto.CompactTextString(m) } +func (*PruneRequest) ProtoMessage() {} +func (*PruneRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{0} } + +type DiskUsageRequest struct { + Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` +} + +func (m *DiskUsageRequest) Reset() { *m = DiskUsageRequest{} } +func (m *DiskUsageRequest) String() string { return proto.CompactTextString(m) } +func (*DiskUsageRequest) ProtoMessage() {} +func (*DiskUsageRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{1} } + +func (m *DiskUsageRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +type DiskUsageResponse struct { + Record []*UsageRecord `protobuf:"bytes,1,rep,name=record" json:"record,omitempty"` +} + +func (m *DiskUsageResponse) Reset() { *m = DiskUsageResponse{} } +func (m *DiskUsageResponse) String() string { return proto.CompactTextString(m) } +func (*DiskUsageResponse) ProtoMessage() {} +func (*DiskUsageResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{2} } + +func (m *DiskUsageResponse) GetRecord() []*UsageRecord { + if m != nil { + return m.Record + } + return nil +} + +type UsageRecord struct { + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Mutable bool `protobuf:"varint,2,opt,name=Mutable,proto3" json:"Mutable,omitempty"` + InUse bool `protobuf:"varint,3,opt,name=InUse,proto3" json:"InUse,omitempty"` + Size_ int64 `protobuf:"varint,4,opt,name=Size,proto3" json:"Size,omitempty"` + Parent string `protobuf:"bytes,5,opt,name=Parent,proto3" json:"Parent,omitempty"` + CreatedAt time.Time `protobuf:"bytes,6,opt,name=CreatedAt,stdtime" json:"CreatedAt"` + LastUsedAt *time.Time `protobuf:"bytes,7,opt,name=LastUsedAt,stdtime" json:"LastUsedAt,omitempty"` + UsageCount int64 `protobuf:"varint,8,opt,name=UsageCount,proto3" json:"UsageCount,omitempty"` + Description string `protobuf:"bytes,9,opt,name=Description,proto3" json:"Description,omitempty"` +} + +func (m *UsageRecord) Reset() { *m = UsageRecord{} } +func (m *UsageRecord) String() string { return proto.CompactTextString(m) } +func (*UsageRecord) ProtoMessage() {} +func (*UsageRecord) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{3} } + +func (m *UsageRecord) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *UsageRecord) GetMutable() bool { + if m != nil { + return m.Mutable + } + return false +} + +func (m *UsageRecord) GetInUse() bool { + if m != nil { + return m.InUse + } + return false +} + +func (m *UsageRecord) GetSize_() int64 { + if m != nil { + return m.Size_ + } + return 0 +} + +func (m *UsageRecord) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *UsageRecord) GetCreatedAt() time.Time { + if m != nil { + return m.CreatedAt + } + return time.Time{} +} + +func (m *UsageRecord) GetLastUsedAt() *time.Time { + if m != nil { + return m.LastUsedAt + } + return nil +} + +func (m *UsageRecord) GetUsageCount() int64 { + if m != nil { + return m.UsageCount + } + return 0 +} + +func (m *UsageRecord) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +type SolveRequest struct { + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + Definition *pb.Definition `protobuf:"bytes,2,opt,name=Definition" json:"Definition,omitempty"` + Exporter string `protobuf:"bytes,3,opt,name=Exporter,proto3" json:"Exporter,omitempty"` + ExporterAttrs map[string]string `protobuf:"bytes,4,rep,name=ExporterAttrs" json:"ExporterAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Session string `protobuf:"bytes,5,opt,name=Session,proto3" json:"Session,omitempty"` + Frontend string `protobuf:"bytes,6,opt,name=Frontend,proto3" json:"Frontend,omitempty"` + FrontendAttrs map[string]string `protobuf:"bytes,7,rep,name=FrontendAttrs" json:"FrontendAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Cache CacheOptions `protobuf:"bytes,8,opt,name=Cache" json:"Cache"` +} + +func (m *SolveRequest) Reset() { *m = SolveRequest{} } +func (m *SolveRequest) String() string { return proto.CompactTextString(m) } +func (*SolveRequest) ProtoMessage() {} +func (*SolveRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{4} } + +func (m *SolveRequest) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + +func (m *SolveRequest) GetDefinition() *pb.Definition { + if m != nil { + return m.Definition + } + return nil +} + +func (m *SolveRequest) GetExporter() string { + if m != nil { + return m.Exporter + } + return "" +} + +func (m *SolveRequest) GetExporterAttrs() map[string]string { + if m != nil { + return m.ExporterAttrs + } + return nil +} + +func (m *SolveRequest) GetSession() string { + if m != nil { + return m.Session + } + return "" +} + +func (m *SolveRequest) GetFrontend() string { + if m != nil { + return m.Frontend + } + return "" +} + +func (m *SolveRequest) GetFrontendAttrs() map[string]string { + if m != nil { + return m.FrontendAttrs + } + return nil +} + +func (m *SolveRequest) GetCache() CacheOptions { + if m != nil { + return m.Cache + } + return CacheOptions{} +} + +type CacheOptions struct { + ExportRef string `protobuf:"bytes,1,opt,name=ExportRef,proto3" json:"ExportRef,omitempty"` + ImportRefs []string `protobuf:"bytes,2,rep,name=ImportRefs" json:"ImportRefs,omitempty"` + ExportAttrs map[string]string `protobuf:"bytes,3,rep,name=ExportAttrs" json:"ExportAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *CacheOptions) Reset() { *m = CacheOptions{} } +func (m *CacheOptions) String() string { return proto.CompactTextString(m) } +func (*CacheOptions) ProtoMessage() {} +func (*CacheOptions) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{5} } + +func (m *CacheOptions) GetExportRef() string { + if m != nil { + return m.ExportRef + } + return "" +} + +func (m *CacheOptions) GetImportRefs() []string { + if m != nil { + return m.ImportRefs + } + return nil +} + +func (m *CacheOptions) GetExportAttrs() map[string]string { + if m != nil { + return m.ExportAttrs + } + return nil +} + +type SolveResponse struct { + ExporterResponse map[string]string `protobuf:"bytes,1,rep,name=ExporterResponse" json:"ExporterResponse,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *SolveResponse) Reset() { *m = SolveResponse{} } +func (m *SolveResponse) String() string { return proto.CompactTextString(m) } +func (*SolveResponse) ProtoMessage() {} +func (*SolveResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{6} } + +func (m *SolveResponse) GetExporterResponse() map[string]string { + if m != nil { + return m.ExporterResponse + } + return nil +} + +type StatusRequest struct { + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` +} + +func (m *StatusRequest) Reset() { *m = StatusRequest{} } +func (m *StatusRequest) String() string { return proto.CompactTextString(m) } +func (*StatusRequest) ProtoMessage() {} +func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{7} } + +func (m *StatusRequest) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + +type StatusResponse struct { + Vertexes []*Vertex `protobuf:"bytes,1,rep,name=vertexes" json:"vertexes,omitempty"` + Statuses []*VertexStatus `protobuf:"bytes,2,rep,name=statuses" json:"statuses,omitempty"` + Logs []*VertexLog `protobuf:"bytes,3,rep,name=logs" json:"logs,omitempty"` +} + +func (m *StatusResponse) Reset() { *m = StatusResponse{} } +func (m *StatusResponse) String() string { return proto.CompactTextString(m) } +func (*StatusResponse) ProtoMessage() {} +func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{8} } + +func (m *StatusResponse) GetVertexes() []*Vertex { + if m != nil { + return m.Vertexes + } + return nil +} + +func (m *StatusResponse) GetStatuses() []*VertexStatus { + if m != nil { + return m.Statuses + } + return nil +} + +func (m *StatusResponse) GetLogs() []*VertexLog { + if m != nil { + return m.Logs + } + return nil +} + +type Vertex struct { + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + Inputs []github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,rep,name=inputs,customtype=github.com/opencontainers/go-digest.Digest" json:"inputs"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Cached bool `protobuf:"varint,4,opt,name=cached,proto3" json:"cached,omitempty"` + Started *time.Time `protobuf:"bytes,5,opt,name=started,stdtime" json:"started,omitempty"` + Completed *time.Time `protobuf:"bytes,6,opt,name=completed,stdtime" json:"completed,omitempty"` + Error string `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *Vertex) Reset() { *m = Vertex{} } +func (m *Vertex) String() string { return proto.CompactTextString(m) } +func (*Vertex) ProtoMessage() {} +func (*Vertex) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{9} } + +func (m *Vertex) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Vertex) GetCached() bool { + if m != nil { + return m.Cached + } + return false +} + +func (m *Vertex) GetStarted() *time.Time { + if m != nil { + return m.Started + } + return nil +} + +func (m *Vertex) GetCompleted() *time.Time { + if m != nil { + return m.Completed + } + return nil +} + +func (m *Vertex) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type VertexStatus struct { + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Vertex github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=vertex,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"vertex"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Current int64 `protobuf:"varint,4,opt,name=current,proto3" json:"current,omitempty"` + Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` + // TODO: add started, completed + Timestamp time.Time `protobuf:"bytes,6,opt,name=timestamp,stdtime" json:"timestamp"` + Started *time.Time `protobuf:"bytes,7,opt,name=started,stdtime" json:"started,omitempty"` + Completed *time.Time `protobuf:"bytes,8,opt,name=completed,stdtime" json:"completed,omitempty"` +} + +func (m *VertexStatus) Reset() { *m = VertexStatus{} } +func (m *VertexStatus) String() string { return proto.CompactTextString(m) } +func (*VertexStatus) ProtoMessage() {} +func (*VertexStatus) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{10} } + +func (m *VertexStatus) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *VertexStatus) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *VertexStatus) GetCurrent() int64 { + if m != nil { + return m.Current + } + return 0 +} + +func (m *VertexStatus) GetTotal() int64 { + if m != nil { + return m.Total + } + return 0 +} + +func (m *VertexStatus) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *VertexStatus) GetStarted() *time.Time { + if m != nil { + return m.Started + } + return nil +} + +func (m *VertexStatus) GetCompleted() *time.Time { + if m != nil { + return m.Completed + } + return nil +} + +type VertexLog struct { + Vertex github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=vertex,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"vertex"` + Timestamp time.Time `protobuf:"bytes,2,opt,name=timestamp,stdtime" json:"timestamp"` + Stream int64 `protobuf:"varint,3,opt,name=stream,proto3" json:"stream,omitempty"` + Msg []byte `protobuf:"bytes,4,opt,name=msg,proto3" json:"msg,omitempty"` +} + +func (m *VertexLog) Reset() { *m = VertexLog{} } +func (m *VertexLog) String() string { return proto.CompactTextString(m) } +func (*VertexLog) ProtoMessage() {} +func (*VertexLog) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{11} } + +func (m *VertexLog) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *VertexLog) GetStream() int64 { + if m != nil { + return m.Stream + } + return 0 +} + +func (m *VertexLog) GetMsg() []byte { + if m != nil { + return m.Msg + } + return nil +} + +type BytesMessage struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *BytesMessage) Reset() { *m = BytesMessage{} } +func (m *BytesMessage) String() string { return proto.CompactTextString(m) } +func (*BytesMessage) ProtoMessage() {} +func (*BytesMessage) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{12} } + +func (m *BytesMessage) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type ListWorkersRequest struct { + Filter []string `protobuf:"bytes,1,rep,name=filter" json:"filter,omitempty"` +} + +func (m *ListWorkersRequest) Reset() { *m = ListWorkersRequest{} } +func (m *ListWorkersRequest) String() string { return proto.CompactTextString(m) } +func (*ListWorkersRequest) ProtoMessage() {} +func (*ListWorkersRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{13} } + +func (m *ListWorkersRequest) GetFilter() []string { + if m != nil { + return m.Filter + } + return nil +} + +type ListWorkersResponse struct { + Record []*WorkerRecord `protobuf:"bytes,1,rep,name=record" json:"record,omitempty"` +} + +func (m *ListWorkersResponse) Reset() { *m = ListWorkersResponse{} } +func (m *ListWorkersResponse) String() string { return proto.CompactTextString(m) } +func (*ListWorkersResponse) ProtoMessage() {} +func (*ListWorkersResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{14} } + +func (m *ListWorkersResponse) GetRecord() []*WorkerRecord { + if m != nil { + return m.Record + } + return nil +} + +type WorkerRecord struct { + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=Labels" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *WorkerRecord) Reset() { *m = WorkerRecord{} } +func (m *WorkerRecord) String() string { return proto.CompactTextString(m) } +func (*WorkerRecord) ProtoMessage() {} +func (*WorkerRecord) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{15} } + +func (m *WorkerRecord) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *WorkerRecord) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func init() { + proto.RegisterType((*PruneRequest)(nil), "moby.buildkit.v1.PruneRequest") + proto.RegisterType((*DiskUsageRequest)(nil), "moby.buildkit.v1.DiskUsageRequest") + proto.RegisterType((*DiskUsageResponse)(nil), "moby.buildkit.v1.DiskUsageResponse") + proto.RegisterType((*UsageRecord)(nil), "moby.buildkit.v1.UsageRecord") + proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.SolveRequest") + proto.RegisterType((*CacheOptions)(nil), "moby.buildkit.v1.CacheOptions") + proto.RegisterType((*SolveResponse)(nil), "moby.buildkit.v1.SolveResponse") + proto.RegisterType((*StatusRequest)(nil), "moby.buildkit.v1.StatusRequest") + proto.RegisterType((*StatusResponse)(nil), "moby.buildkit.v1.StatusResponse") + proto.RegisterType((*Vertex)(nil), "moby.buildkit.v1.Vertex") + proto.RegisterType((*VertexStatus)(nil), "moby.buildkit.v1.VertexStatus") + proto.RegisterType((*VertexLog)(nil), "moby.buildkit.v1.VertexLog") + proto.RegisterType((*BytesMessage)(nil), "moby.buildkit.v1.BytesMessage") + proto.RegisterType((*ListWorkersRequest)(nil), "moby.buildkit.v1.ListWorkersRequest") + proto.RegisterType((*ListWorkersResponse)(nil), "moby.buildkit.v1.ListWorkersResponse") + proto.RegisterType((*WorkerRecord)(nil), "moby.buildkit.v1.WorkerRecord") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Control service + +type ControlClient interface { + DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error) + Prune(ctx context.Context, in *PruneRequest, opts ...grpc.CallOption) (Control_PruneClient, error) + Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error) + Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error) + ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) +} + +type controlClient struct { + cc *grpc.ClientConn +} + +func NewControlClient(cc *grpc.ClientConn) ControlClient { + return &controlClient{cc} +} + +func (c *controlClient) DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error) { + out := new(DiskUsageResponse) + err := grpc.Invoke(ctx, "/moby.buildkit.v1.Control/DiskUsage", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) Prune(ctx context.Context, in *PruneRequest, opts ...grpc.CallOption) (Control_PruneClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Control_serviceDesc.Streams[0], c.cc, "/moby.buildkit.v1.Control/Prune", opts...) + if err != nil { + return nil, err + } + x := &controlPruneClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Control_PruneClient interface { + Recv() (*UsageRecord, error) + grpc.ClientStream +} + +type controlPruneClient struct { + grpc.ClientStream +} + +func (x *controlPruneClient) Recv() (*UsageRecord, error) { + m := new(UsageRecord) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *controlClient) Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) { + out := new(SolveResponse) + err := grpc.Invoke(ctx, "/moby.buildkit.v1.Control/Solve", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Control_serviceDesc.Streams[1], c.cc, "/moby.buildkit.v1.Control/Status", opts...) + if err != nil { + return nil, err + } + x := &controlStatusClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Control_StatusClient interface { + Recv() (*StatusResponse, error) + grpc.ClientStream +} + +type controlStatusClient struct { + grpc.ClientStream +} + +func (x *controlStatusClient) Recv() (*StatusResponse, error) { + m := new(StatusResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *controlClient) Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Control_serviceDesc.Streams[2], c.cc, "/moby.buildkit.v1.Control/Session", opts...) + if err != nil { + return nil, err + } + x := &controlSessionClient{stream} + return x, nil +} + +type Control_SessionClient interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ClientStream +} + +type controlSessionClient struct { + grpc.ClientStream +} + +func (x *controlSessionClient) Send(m *BytesMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *controlSessionClient) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *controlClient) ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) { + out := new(ListWorkersResponse) + err := grpc.Invoke(ctx, "/moby.buildkit.v1.Control/ListWorkers", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Control service + +type ControlServer interface { + DiskUsage(context.Context, *DiskUsageRequest) (*DiskUsageResponse, error) + Prune(*PruneRequest, Control_PruneServer) error + Solve(context.Context, *SolveRequest) (*SolveResponse, error) + Status(*StatusRequest, Control_StatusServer) error + Session(Control_SessionServer) error + ListWorkers(context.Context, *ListWorkersRequest) (*ListWorkersResponse, error) +} + +func RegisterControlServer(s *grpc.Server, srv ControlServer) { + s.RegisterService(&_Control_serviceDesc, srv) +} + +func _Control_DiskUsage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DiskUsageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).DiskUsage(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.Control/DiskUsage", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).DiskUsage(ctx, req.(*DiskUsageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_Prune_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(PruneRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ControlServer).Prune(m, &controlPruneServer{stream}) +} + +type Control_PruneServer interface { + Send(*UsageRecord) error + grpc.ServerStream +} + +type controlPruneServer struct { + grpc.ServerStream +} + +func (x *controlPruneServer) Send(m *UsageRecord) error { + return x.ServerStream.SendMsg(m) +} + +func _Control_Solve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SolveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).Solve(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.Control/Solve", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).Solve(ctx, req.(*SolveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_Status_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StatusRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ControlServer).Status(m, &controlStatusServer{stream}) +} + +type Control_StatusServer interface { + Send(*StatusResponse) error + grpc.ServerStream +} + +type controlStatusServer struct { + grpc.ServerStream +} + +func (x *controlStatusServer) Send(m *StatusResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Control_Session_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ControlServer).Session(&controlSessionServer{stream}) +} + +type Control_SessionServer interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ServerStream +} + +type controlSessionServer struct { + grpc.ServerStream +} + +func (x *controlSessionServer) Send(m *BytesMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *controlSessionServer) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Control_ListWorkers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListWorkersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListWorkers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.Control/ListWorkers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListWorkers(ctx, req.(*ListWorkersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Control_serviceDesc = grpc.ServiceDesc{ + ServiceName: "moby.buildkit.v1.Control", + HandlerType: (*ControlServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DiskUsage", + Handler: _Control_DiskUsage_Handler, + }, + { + MethodName: "Solve", + Handler: _Control_Solve_Handler, + }, + { + MethodName: "ListWorkers", + Handler: _Control_ListWorkers_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Prune", + Handler: _Control_Prune_Handler, + ServerStreams: true, + }, + { + StreamName: "Status", + Handler: _Control_Status_Handler, + ServerStreams: true, + }, + { + StreamName: "Session", + Handler: _Control_Session_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "control.proto", +} + +func (m *PruneRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PruneRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *DiskUsageRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiskUsageRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Filter) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Filter))) + i += copy(dAtA[i:], m.Filter) + } + return i, nil +} + +func (m *DiskUsageResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiskUsageResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Record) > 0 { + for _, msg := range m.Record { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *UsageRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UsageRecord) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.Mutable { + dAtA[i] = 0x10 + i++ + if m.Mutable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.InUse { + dAtA[i] = 0x18 + i++ + if m.InUse { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Size_ != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Size_)) + } + if len(m.Parent) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Parent))) + i += copy(dAtA[i:], m.Parent) + } + dAtA[i] = 0x32 + i++ + i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(m.CreatedAt))) + n1, err := types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.LastUsedAt != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(*m.LastUsedAt))) + n2, err := types.StdTimeMarshalTo(*m.LastUsedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.UsageCount != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.UsageCount)) + } + if len(m.Description) > 0 { + dAtA[i] = 0x4a + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + return i, nil +} + +func (m *SolveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ref) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) + i += copy(dAtA[i:], m.Ref) + } + if m.Definition != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Definition.Size())) + n3, err := m.Definition.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if len(m.Exporter) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Exporter))) + i += copy(dAtA[i:], m.Exporter) + } + if len(m.ExporterAttrs) > 0 { + for k, _ := range m.ExporterAttrs { + dAtA[i] = 0x22 + i++ + v := m.ExporterAttrs[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Session) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Session))) + i += copy(dAtA[i:], m.Session) + } + if len(m.Frontend) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Frontend))) + i += copy(dAtA[i:], m.Frontend) + } + if len(m.FrontendAttrs) > 0 { + for k, _ := range m.FrontendAttrs { + dAtA[i] = 0x3a + i++ + v := m.FrontendAttrs[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + dAtA[i] = 0x42 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Cache.Size())) + n4, err := m.Cache.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + return i, nil +} + +func (m *CacheOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CacheOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ExportRef) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ExportRef))) + i += copy(dAtA[i:], m.ExportRef) + } + if len(m.ImportRefs) > 0 { + for _, s := range m.ImportRefs { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ExportAttrs) > 0 { + for k, _ := range m.ExportAttrs { + dAtA[i] = 0x1a + i++ + v := m.ExportAttrs[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *SolveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SolveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ExporterResponse) > 0 { + for k, _ := range m.ExporterResponse { + dAtA[i] = 0xa + i++ + v := m.ExporterResponse[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *StatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ref) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) + i += copy(dAtA[i:], m.Ref) + } + return i, nil +} + +func (m *StatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Vertexes) > 0 { + for _, msg := range m.Vertexes { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Statuses) > 0 { + for _, msg := range m.Statuses { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Logs) > 0 { + for _, msg := range m.Logs { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Vertex) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Vertex) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Digest) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Digest))) + i += copy(dAtA[i:], m.Digest) + } + if len(m.Inputs) > 0 { + for _, s := range m.Inputs { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Name) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Cached { + dAtA[i] = 0x20 + i++ + if m.Cached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Started != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(*m.Started))) + n5, err := types.StdTimeMarshalTo(*m.Started, dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.Completed != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(*m.Completed))) + n6, err := types.StdTimeMarshalTo(*m.Completed, dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if len(m.Error) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + } + return i, nil +} + +func (m *VertexStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VertexStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.Vertex) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex))) + i += copy(dAtA[i:], m.Vertex) + } + if len(m.Name) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Current != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Current)) + } + if m.Total != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Total)) + } + dAtA[i] = 0x32 + i++ + i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(m.Timestamp))) + n7, err := types.StdTimeMarshalTo(m.Timestamp, dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + if m.Started != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(*m.Started))) + n8, err := types.StdTimeMarshalTo(*m.Started, dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.Completed != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(*m.Completed))) + n9, err := types.StdTimeMarshalTo(*m.Completed, dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} + +func (m *VertexLog) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VertexLog) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Vertex) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex))) + i += copy(dAtA[i:], m.Vertex) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(m.Timestamp))) + n10, err := types.StdTimeMarshalTo(m.Timestamp, dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + if m.Stream != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Stream)) + } + if len(m.Msg) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Msg))) + i += copy(dAtA[i:], m.Msg) + } + return i, nil +} + +func (m *BytesMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Data) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + return i, nil +} + +func (m *ListWorkersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListWorkersRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Filter) > 0 { + for _, s := range m.Filter { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListWorkersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListWorkersResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Record) > 0 { + for _, msg := range m.Record { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *WorkerRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkerRecord) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func encodeVarintControl(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PruneRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *DiskUsageRequest) Size() (n int) { + var l int + _ = l + l = len(m.Filter) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *DiskUsageResponse) Size() (n int) { + var l int + _ = l + if len(m.Record) > 0 { + for _, e := range m.Record { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *UsageRecord) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Mutable { + n += 2 + } + if m.InUse { + n += 2 + } + if m.Size_ != 0 { + n += 1 + sovControl(uint64(m.Size_)) + } + l = len(m.Parent) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = types.SizeOfStdTime(m.CreatedAt) + n += 1 + l + sovControl(uint64(l)) + if m.LastUsedAt != nil { + l = types.SizeOfStdTime(*m.LastUsedAt) + n += 1 + l + sovControl(uint64(l)) + } + if m.UsageCount != 0 { + n += 1 + sovControl(uint64(m.UsageCount)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *SolveRequest) Size() (n int) { + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Definition != nil { + l = m.Definition.Size() + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Exporter) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.ExporterAttrs) > 0 { + for k, v := range m.ExporterAttrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + l = len(m.Session) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Frontend) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.FrontendAttrs) > 0 { + for k, v := range m.FrontendAttrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + l = m.Cache.Size() + n += 1 + l + sovControl(uint64(l)) + return n +} + +func (m *CacheOptions) Size() (n int) { + var l int + _ = l + l = len(m.ExportRef) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.ImportRefs) > 0 { + for _, s := range m.ImportRefs { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.ExportAttrs) > 0 { + for k, v := range m.ExportAttrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SolveResponse) Size() (n int) { + var l int + _ = l + if len(m.ExporterResponse) > 0 { + for k, v := range m.ExporterResponse { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + return n +} + +func (m *StatusRequest) Size() (n int) { + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *StatusResponse) Size() (n int) { + var l int + _ = l + if len(m.Vertexes) > 0 { + for _, e := range m.Vertexes { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Statuses) > 0 { + for _, e := range m.Statuses { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Logs) > 0 { + for _, e := range m.Logs { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *Vertex) Size() (n int) { + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.Inputs) > 0 { + for _, s := range m.Inputs { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Cached { + n += 2 + } + if m.Started != nil { + l = types.SizeOfStdTime(*m.Started) + n += 1 + l + sovControl(uint64(l)) + } + if m.Completed != nil { + l = types.SizeOfStdTime(*m.Completed) + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *VertexStatus) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Vertex) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Current != 0 { + n += 1 + sovControl(uint64(m.Current)) + } + if m.Total != 0 { + n += 1 + sovControl(uint64(m.Total)) + } + l = types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovControl(uint64(l)) + if m.Started != nil { + l = types.SizeOfStdTime(*m.Started) + n += 1 + l + sovControl(uint64(l)) + } + if m.Completed != nil { + l = types.SizeOfStdTime(*m.Completed) + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *VertexLog) Size() (n int) { + var l int + _ = l + l = len(m.Vertex) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovControl(uint64(l)) + if m.Stream != 0 { + n += 1 + sovControl(uint64(m.Stream)) + } + l = len(m.Msg) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *BytesMessage) Size() (n int) { + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListWorkersRequest) Size() (n int) { + var l int + _ = l + if len(m.Filter) > 0 { + for _, s := range m.Filter { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListWorkersResponse) Size() (n int) { + var l int + _ = l + if len(m.Record) > 0 { + for _, e := range m.Record { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *WorkerRecord) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + return n +} + +func sovControl(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozControl(x uint64) (n int) { + return sovControl(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PruneRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PruneRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PruneRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DiskUsageRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DiskUsageRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DiskUsageRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filter = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DiskUsageResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DiskUsageResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DiskUsageResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Record = append(m.Record, &UsageRecord{}) + if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UsageRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UsageRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UsageRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mutable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Mutable = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InUse", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InUse = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parent = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUsedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastUsedAt == nil { + m.LastUsedAt = new(time.Time) + } + if err := types.StdTimeUnmarshal(m.LastUsedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UsageCount", wireType) + } + m.UsageCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UsageCount |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SolveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SolveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SolveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Definition", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Definition == nil { + m.Definition = &pb.Definition{} + } + if err := m.Definition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exporter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exporter = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExporterAttrs == nil { + m.ExporterAttrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ExporterAttrs[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Session", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Session = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Frontend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Frontend = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrontendAttrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FrontendAttrs == nil { + m.FrontendAttrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.FrontendAttrs[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Cache.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CacheOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CacheOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CacheOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExportRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExportRef = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportRefs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImportRefs = append(m.ImportRefs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExportAttrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExportAttrs == nil { + m.ExportAttrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ExportAttrs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SolveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SolveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SolveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExporterResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExporterResponse == nil { + m.ExporterResponse = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ExporterResponse[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vertexes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Vertexes = append(m.Vertexes, &Vertex{}) + if err := m.Vertexes[len(m.Vertexes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Statuses = append(m.Statuses, &VertexStatus{}) + if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Logs = append(m.Logs, &VertexLog{}) + if err := m.Logs[len(m.Logs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Vertex) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Vertex: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Vertex: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Inputs = append(m.Inputs, github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cached", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Cached = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Started == nil { + m.Started = new(time.Time) + } + if err := types.StdTimeUnmarshal(m.Started, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Completed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Completed == nil { + m.Completed = new(time.Time) + } + if err := types.StdTimeUnmarshal(m.Completed, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VertexStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VertexStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VertexStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + m.Current = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Current |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Started == nil { + m.Started = new(time.Time) + } + if err := types.StdTimeUnmarshal(m.Started, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Completed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Completed == nil { + m.Completed = new(time.Time) + } + if err := types.StdTimeUnmarshal(m.Completed, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VertexLog) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VertexLog: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VertexLog: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) + } + m.Stream = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Stream |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Msg = append(m.Msg[:0], dAtA[iNdEx:postIndex]...) + if m.Msg == nil { + m.Msg = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BytesMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListWorkersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListWorkersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListWorkersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListWorkersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListWorkersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListWorkersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Record = append(m.Record, &WorkerRecord{}) + if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkerRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkerRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkerRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipControl(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthControl + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipControl(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthControl = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowControl = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("control.proto", fileDescriptorControl) } + +var fileDescriptorControl = []byte{ + // 1192 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xcd, 0x6e, 0x23, 0x45, + 0x10, 0x66, 0x6c, 0xc7, 0x3f, 0x65, 0x27, 0x0a, 0x0d, 0xac, 0x46, 0x03, 0x24, 0x66, 0x00, 0xc9, + 0x8a, 0x76, 0xc7, 0xd9, 0xc0, 0x22, 0xc8, 0x61, 0xb5, 0xeb, 0x78, 0x11, 0x89, 0x12, 0xb1, 0x74, + 0x36, 0xac, 0xc4, 0x6d, 0x6c, 0x77, 0xbc, 0xa3, 0xd8, 0xd3, 0xa6, 0xbb, 0x27, 0xda, 0xf0, 0x14, + 0x1c, 0xb8, 0xf2, 0x14, 0x1c, 0x38, 0x73, 0x40, 0xda, 0x23, 0x67, 0x0e, 0x59, 0x94, 0x3b, 0x3c, + 0x03, 0xea, 0x9f, 0xb1, 0xdb, 0x1e, 0xe7, 0xc7, 0xd9, 0x53, 0xba, 0x3a, 0x5f, 0x7d, 0x53, 0x5d, + 0x5f, 0xb9, 0xaa, 0x60, 0xb9, 0x4b, 0x63, 0xc1, 0xe8, 0x20, 0x18, 0x31, 0x2a, 0x28, 0x5a, 0x1d, + 0xd2, 0xce, 0x59, 0xd0, 0x49, 0xa2, 0x41, 0xef, 0x24, 0x12, 0xc1, 0xe9, 0x7d, 0xef, 0x5e, 0x3f, + 0x12, 0x2f, 0x92, 0x4e, 0xd0, 0xa5, 0xc3, 0x66, 0x9f, 0xf6, 0x69, 0x53, 0x01, 0x3b, 0xc9, 0xb1, + 0xb2, 0x94, 0xa1, 0x4e, 0x9a, 0xc0, 0x5b, 0xef, 0x53, 0xda, 0x1f, 0x90, 0x09, 0x4a, 0x44, 0x43, + 0xc2, 0x45, 0x38, 0x1c, 0x19, 0xc0, 0x5d, 0x8b, 0x4f, 0x7e, 0xac, 0x99, 0x7e, 0xac, 0xc9, 0xe9, + 0xe0, 0x94, 0xb0, 0xe6, 0xa8, 0xd3, 0xa4, 0x23, 0xae, 0xd1, 0xfe, 0x0a, 0xd4, 0x9e, 0xb2, 0x24, + 0x26, 0x98, 0xfc, 0x98, 0x10, 0x2e, 0xfc, 0x0d, 0x58, 0x6d, 0x47, 0xfc, 0xe4, 0x88, 0x87, 0xfd, + 0xf4, 0x0e, 0xdd, 0x81, 0xe2, 0x71, 0x34, 0x10, 0x84, 0xb9, 0x4e, 0xdd, 0x69, 0x54, 0xb0, 0xb1, + 0xfc, 0x3d, 0x78, 0xdb, 0xc2, 0xf2, 0x11, 0x8d, 0x39, 0x41, 0x0f, 0xa0, 0xc8, 0x48, 0x97, 0xb2, + 0x9e, 0xeb, 0xd4, 0xf3, 0x8d, 0xea, 0xd6, 0x87, 0xc1, 0xec, 0x8b, 0x03, 0xe3, 0x20, 0x41, 0xd8, + 0x80, 0xfd, 0x3f, 0x72, 0x50, 0xb5, 0xee, 0xd1, 0x0a, 0xe4, 0x76, 0xdb, 0xe6, 0x7b, 0xb9, 0xdd, + 0x36, 0x72, 0xa1, 0x74, 0x90, 0x88, 0xb0, 0x33, 0x20, 0x6e, 0xae, 0xee, 0x34, 0xca, 0x38, 0x35, + 0xd1, 0xbb, 0xb0, 0xb4, 0x1b, 0x1f, 0x71, 0xe2, 0xe6, 0xd5, 0xbd, 0x36, 0x10, 0x82, 0xc2, 0x61, + 0xf4, 0x13, 0x71, 0x0b, 0x75, 0xa7, 0x91, 0xc7, 0xea, 0x2c, 0xdf, 0xf1, 0x34, 0x64, 0x24, 0x16, + 0xee, 0x92, 0x7e, 0x87, 0xb6, 0x50, 0x0b, 0x2a, 0x3b, 0x8c, 0x84, 0x82, 0xf4, 0x1e, 0x0b, 0xb7, + 0x58, 0x77, 0x1a, 0xd5, 0x2d, 0x2f, 0xd0, 0x69, 0x0e, 0xd2, 0x34, 0x07, 0xcf, 0xd2, 0x34, 0xb7, + 0xca, 0xaf, 0xce, 0xd7, 0xdf, 0xfa, 0xf9, 0xf5, 0xba, 0x83, 0x27, 0x6e, 0xe8, 0x11, 0xc0, 0x7e, + 0xc8, 0xc5, 0x11, 0x57, 0x24, 0xa5, 0x6b, 0x49, 0x0a, 0x8a, 0xc0, 0xf2, 0x41, 0x6b, 0x00, 0x2a, + 0x01, 0x3b, 0x34, 0x89, 0x85, 0x5b, 0x56, 0x71, 0x5b, 0x37, 0xa8, 0x0e, 0xd5, 0x36, 0xe1, 0x5d, + 0x16, 0x8d, 0x44, 0x44, 0x63, 0xb7, 0xa2, 0x9e, 0x60, 0x5f, 0xf9, 0xbf, 0x14, 0xa0, 0x76, 0x28, + 0x35, 0x4e, 0x85, 0x5b, 0x85, 0x3c, 0x26, 0xc7, 0x26, 0x8b, 0xf2, 0x88, 0x02, 0x80, 0x36, 0x39, + 0x8e, 0xe2, 0x48, 0x71, 0xe4, 0x54, 0x98, 0x2b, 0xc1, 0xa8, 0x13, 0x4c, 0x6e, 0xb1, 0x85, 0x40, + 0x1e, 0x94, 0x9f, 0xbc, 0x1c, 0x51, 0x26, 0xc5, 0xcf, 0x2b, 0x9a, 0xb1, 0x8d, 0x9e, 0xc3, 0x72, + 0x7a, 0x7e, 0x2c, 0x04, 0xe3, 0x6e, 0x41, 0x09, 0x7e, 0x3f, 0x2b, 0xb8, 0x1d, 0x54, 0x30, 0xe5, + 0xf3, 0x24, 0x16, 0xec, 0x0c, 0x4f, 0xf3, 0x48, 0xad, 0x0f, 0x09, 0xe7, 0x32, 0x42, 0x2d, 0x54, + 0x6a, 0xca, 0x70, 0xbe, 0x66, 0x34, 0x16, 0x24, 0xee, 0x29, 0xa1, 0x2a, 0x78, 0x6c, 0xcb, 0x70, + 0xd2, 0xb3, 0x0e, 0xa7, 0x74, 0xa3, 0x70, 0xa6, 0x7c, 0x4c, 0x38, 0x53, 0x77, 0x68, 0x1b, 0x96, + 0x76, 0xc2, 0xee, 0x0b, 0xa2, 0x34, 0xa9, 0x6e, 0xad, 0x65, 0x09, 0xd5, 0xbf, 0xbf, 0x55, 0x22, + 0xf0, 0x56, 0x41, 0x96, 0x07, 0xd6, 0x2e, 0xde, 0x23, 0x40, 0xd9, 0xf7, 0x4a, 0x5d, 0x4e, 0xc8, + 0x59, 0xaa, 0xcb, 0x09, 0x39, 0x93, 0x45, 0x7c, 0x1a, 0x0e, 0x12, 0x5d, 0xdc, 0x15, 0xac, 0x8d, + 0xed, 0xdc, 0x97, 0x8e, 0x64, 0xc8, 0x86, 0xb8, 0x08, 0x83, 0xff, 0xda, 0x81, 0x9a, 0x1d, 0x21, + 0xfa, 0x00, 0x2a, 0x3a, 0xa8, 0x49, 0x71, 0x4c, 0x2e, 0x64, 0x1d, 0xee, 0x0e, 0x8d, 0xc1, 0xdd, + 0x5c, 0x3d, 0xdf, 0xa8, 0x60, 0xeb, 0x06, 0x7d, 0x07, 0x55, 0x0d, 0xd6, 0x59, 0xce, 0xab, 0x2c, + 0x37, 0xaf, 0x4e, 0x4a, 0x60, 0x79, 0xe8, 0x1c, 0xdb, 0x1c, 0xde, 0x43, 0x58, 0x9d, 0x05, 0x2c, + 0xf4, 0xc2, 0xdf, 0x1d, 0x58, 0x36, 0xa2, 0x9a, 0x2e, 0x14, 0xa6, 0x8c, 0x84, 0xa5, 0x77, 0xa6, + 0x1f, 0x3d, 0xb8, 0xb4, 0x1e, 0x34, 0x2c, 0x98, 0xf5, 0xd3, 0xf1, 0x66, 0xe8, 0xbc, 0x1d, 0x78, + 0x6f, 0x2e, 0x74, 0xa1, 0xc8, 0x3f, 0x82, 0xe5, 0x43, 0x11, 0x8a, 0x84, 0x5f, 0xfa, 0x93, 0xf5, + 0x7f, 0x73, 0x60, 0x25, 0xc5, 0x98, 0xd7, 0x7d, 0x0e, 0xe5, 0x53, 0xc2, 0x04, 0x79, 0x49, 0xb8, + 0x79, 0x95, 0x9b, 0x7d, 0xd5, 0xf7, 0x0a, 0x81, 0xc7, 0x48, 0xb4, 0x0d, 0x65, 0xae, 0x78, 0x88, + 0x96, 0x75, 0x6e, 0x29, 0x6b, 0x2f, 0xf3, 0xbd, 0x31, 0x1e, 0x35, 0xa1, 0x30, 0xa0, 0xfd, 0x54, + 0xed, 0xf7, 0x2f, 0xf3, 0xdb, 0xa7, 0x7d, 0xac, 0x80, 0xfe, 0x79, 0x0e, 0x8a, 0xfa, 0x0e, 0xed, + 0x41, 0xb1, 0x17, 0xf5, 0x09, 0x17, 0xfa, 0x55, 0xad, 0x2d, 0xf9, 0x03, 0xf9, 0xfb, 0x7c, 0x7d, + 0xc3, 0x1a, 0x54, 0x74, 0x44, 0x62, 0x39, 0x28, 0xc3, 0x28, 0x26, 0x8c, 0x37, 0xfb, 0xf4, 0x9e, + 0x76, 0x09, 0xda, 0xea, 0x0f, 0x36, 0x0c, 0x92, 0x2b, 0x8a, 0x47, 0x89, 0x30, 0x85, 0x79, 0x3b, + 0x2e, 0xcd, 0x20, 0x47, 0x44, 0x1c, 0x0e, 0x89, 0xe9, 0x6b, 0xea, 0x2c, 0x47, 0x44, 0x57, 0xd6, + 0x6d, 0x4f, 0x0d, 0x8e, 0x32, 0x36, 0x16, 0xda, 0x86, 0x12, 0x17, 0x21, 0x13, 0xa4, 0xa7, 0x5a, + 0xd2, 0x4d, 0x7a, 0x7b, 0xea, 0x80, 0x1e, 0x42, 0xa5, 0x4b, 0x87, 0xa3, 0x01, 0x91, 0xde, 0xc5, + 0x1b, 0x7a, 0x4f, 0x5c, 0x64, 0xf5, 0x10, 0xc6, 0x28, 0x53, 0x53, 0xa5, 0x82, 0xb5, 0xe1, 0xff, + 0x97, 0x83, 0x9a, 0x2d, 0x56, 0x66, 0x62, 0xee, 0x41, 0x51, 0x4b, 0xaf, 0xab, 0xee, 0x76, 0xa9, + 0xd2, 0x0c, 0x73, 0x53, 0xe5, 0x42, 0xa9, 0x9b, 0x30, 0x35, 0x4e, 0xf5, 0x90, 0x4d, 0x4d, 0x19, + 0xb0, 0xa0, 0x22, 0x1c, 0xa8, 0x54, 0xe5, 0xb1, 0x36, 0xe4, 0x94, 0x1d, 0xaf, 0x2a, 0x8b, 0x4d, + 0xd9, 0xb1, 0x9b, 0x2d, 0x43, 0xe9, 0x8d, 0x64, 0x28, 0x2f, 0x2c, 0x83, 0xff, 0xa7, 0x03, 0x95, + 0x71, 0x95, 0x5b, 0xd9, 0x75, 0xde, 0x38, 0xbb, 0x53, 0x99, 0xc9, 0xdd, 0x2e, 0x33, 0x77, 0xa0, + 0xc8, 0x05, 0x23, 0xe1, 0x50, 0x69, 0x94, 0xc7, 0xc6, 0x92, 0xfd, 0x64, 0xc8, 0xfb, 0x4a, 0xa1, + 0x1a, 0x96, 0x47, 0xdf, 0x87, 0x5a, 0xeb, 0x4c, 0x10, 0x7e, 0x40, 0xb8, 0x5c, 0x2e, 0xa4, 0xb6, + 0xbd, 0x50, 0x84, 0xea, 0x1d, 0x35, 0xac, 0xce, 0xfe, 0x5d, 0x40, 0xfb, 0x11, 0x17, 0xcf, 0x29, + 0x3b, 0x21, 0x8c, 0xcf, 0xdb, 0x03, 0xf3, 0xd6, 0x1e, 0x78, 0x00, 0xef, 0x4c, 0xa1, 0x4d, 0x97, + 0xfa, 0x62, 0x66, 0x13, 0x9c, 0xd3, 0x6d, 0xb4, 0xcb, 0xcc, 0x2a, 0xf8, 0xab, 0x03, 0x35, 0xfb, + 0x1f, 0x99, 0xca, 0x6e, 0x41, 0x71, 0x3f, 0xec, 0x90, 0x41, 0xda, 0xc6, 0x36, 0xae, 0x26, 0x0e, + 0x34, 0x58, 0xf7, 0x71, 0xe3, 0xe9, 0x7d, 0x05, 0x55, 0xeb, 0x7a, 0x91, 0x9e, 0xbd, 0xf5, 0x6f, + 0x1e, 0x4a, 0x3b, 0x7a, 0xa9, 0x47, 0xcf, 0xa0, 0x32, 0x5e, 0x81, 0x91, 0x9f, 0x8d, 0x63, 0x76, + 0x97, 0xf6, 0x3e, 0xbe, 0x12, 0x63, 0x32, 0xf7, 0x0d, 0x2c, 0xa9, 0xa5, 0x1c, 0xcd, 0x49, 0x99, + 0xbd, 0xad, 0x7b, 0x57, 0x2f, 0xd7, 0x9b, 0x8e, 0x64, 0x52, 0xd3, 0x6d, 0x1e, 0x93, 0xbd, 0x06, + 0x79, 0xeb, 0xd7, 0x8c, 0x45, 0x74, 0x00, 0x45, 0xd3, 0x68, 0xe6, 0x41, 0xed, 0x19, 0xe6, 0xd5, + 0x2f, 0x07, 0x68, 0xb2, 0x4d, 0x07, 0x1d, 0x8c, 0x77, 0xbc, 0x79, 0xa1, 0xd9, 0x05, 0xea, 0x5d, + 0xf3, 0xff, 0x86, 0xb3, 0xe9, 0xa0, 0x1f, 0xa0, 0x6a, 0x95, 0x20, 0xfa, 0x24, 0xeb, 0x92, 0xad, + 0x67, 0xef, 0xd3, 0x6b, 0x50, 0x3a, 0xd8, 0x56, 0xed, 0xd5, 0xc5, 0x9a, 0xf3, 0xd7, 0xc5, 0x9a, + 0xf3, 0xcf, 0xc5, 0x9a, 0xd3, 0x29, 0xaa, 0x5f, 0xe4, 0x67, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, + 0x4d, 0x94, 0x5a, 0xb6, 0xd8, 0x0d, 0x00, 0x00, +} diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.proto b/vendor/github.com/moby/buildkit/api/services/control/control.proto new file mode 100644 index 0000000000..7944ce89a8 --- /dev/null +++ b/vendor/github.com/moby/buildkit/api/services/control/control.proto @@ -0,0 +1,121 @@ +syntax = "proto3"; + +package moby.buildkit.v1; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "github.com/moby/buildkit/solver/pb/ops.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +service Control { + rpc DiskUsage(DiskUsageRequest) returns (DiskUsageResponse); + rpc Prune(PruneRequest) returns (stream UsageRecord); + rpc Solve(SolveRequest) returns (SolveResponse); + rpc Status(StatusRequest) returns (stream StatusResponse); + rpc Session(stream BytesMessage) returns (stream BytesMessage); + rpc ListWorkers(ListWorkersRequest) returns (ListWorkersResponse); +} + +message PruneRequest { + // TODO: filter +} + +message DiskUsageRequest { + string filter = 1; // FIXME: this should be containerd-compatible repeated string? +} + +message DiskUsageResponse { + repeated UsageRecord record = 1; +} + +message UsageRecord { + string ID = 1; + bool Mutable = 2; + bool InUse = 3; + int64 Size = 4; + string Parent = 5; + google.protobuf.Timestamp CreatedAt = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp LastUsedAt = 7 [(gogoproto.stdtime) = true]; + int64 UsageCount = 8; + string Description = 9; +} + +message SolveRequest { + string Ref = 1; + pb.Definition Definition = 2; + string Exporter = 3; + map ExporterAttrs = 4; + string Session = 5; + string Frontend = 6; + map FrontendAttrs = 7; + CacheOptions Cache = 8 [(gogoproto.nullable) = false]; +} + +message CacheOptions { + string ExportRef = 1; + repeated string ImportRefs = 2; + map ExportAttrs = 3; +} + +message SolveResponse { + map ExporterResponse = 1; +} + +message StatusRequest { + string Ref = 1; +} + +message StatusResponse { + repeated Vertex vertexes = 1; + repeated VertexStatus statuses = 2; + repeated VertexLog logs = 3; +} + +message Vertex { + string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + repeated string inputs = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string name = 3; + bool cached = 4; + google.protobuf.Timestamp started = 5 [(gogoproto.stdtime) = true ]; + google.protobuf.Timestamp completed = 6 [(gogoproto.stdtime) = true ]; + string error = 7; // typed errors? +} + +message VertexStatus { + string ID = 1; + string vertex = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string name = 3; + int64 current = 4; + int64 total = 5; + // TODO: add started, completed + google.protobuf.Timestamp timestamp = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp started = 7 [(gogoproto.stdtime) = true ]; + google.protobuf.Timestamp completed = 8 [(gogoproto.stdtime) = true ]; +} + +message VertexLog { + string vertex = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + google.protobuf.Timestamp timestamp = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + int64 stream = 3; + bytes msg = 4; +} + +message BytesMessage { + bytes data = 1; +} + +message ListWorkersRequest { + repeated string filter = 1; // containerd style +} + +message ListWorkersResponse { + repeated WorkerRecord record = 1; +} + +message WorkerRecord { + string ID = 1; + map Labels = 2; +} diff --git a/vendor/github.com/moby/buildkit/api/services/control/generate.go b/vendor/github.com/moby/buildkit/api/services/control/generate.go new file mode 100644 index 0000000000..1c161155f5 --- /dev/null +++ b/vendor/github.com/moby/buildkit/api/services/control/generate.go @@ -0,0 +1,3 @@ +package moby_buildkit_v1 + +//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. control.proto diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go new file mode 100644 index 0000000000..cf89a5f209 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go @@ -0,0 +1,634 @@ +package contenthash + +import ( + "bytes" + "context" + "crypto/sha256" + "io" + "os" + "path" + "path/filepath" + "sync" + + "github.com/containerd/continuity/fs" + "github.com/docker/docker/pkg/locker" + iradix "github.com/hashicorp/go-immutable-radix" + "github.com/hashicorp/golang-lru/simplelru" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/snapshot" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" +) + +var errNotFound = errors.Errorf("not found") + +var defaultManager *cacheManager +var defaultManagerOnce sync.Once + +const keyContentHash = "buildkit.contenthash.v0" + +func getDefaultManager() *cacheManager { + defaultManagerOnce.Do(func() { + lru, _ := simplelru.NewLRU(20, nil) // error is impossible on positive size + defaultManager = &cacheManager{lru: lru, locker: locker.New()} + }) + return defaultManager +} + +// Layout in the radix tree: Every path is saved by cleaned absolute unix path. +// Directories have 2 records, one contains digest for directory header, other +// the recursive digest for directory contents. "/dir/" is the record for +// header, "/dir" is for contents. For the root node "" (empty string) is the +// key for root, "/" for the root header + +func Checksum(ctx context.Context, ref cache.ImmutableRef, path string) (digest.Digest, error) { + return getDefaultManager().Checksum(ctx, ref, path) +} + +func GetCacheContext(ctx context.Context, md *metadata.StorageItem) (CacheContext, error) { + return getDefaultManager().GetCacheContext(ctx, md) +} + +func SetCacheContext(ctx context.Context, md *metadata.StorageItem, cc CacheContext) error { + return getDefaultManager().SetCacheContext(ctx, md, cc) +} + +type CacheContext interface { + Checksum(ctx context.Context, ref cache.Mountable, p string) (digest.Digest, error) + HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) error +} + +type Hashed interface { + Digest() digest.Digest +} + +type cacheManager struct { + locker *locker.Locker + lru *simplelru.LRU + lruMu sync.Mutex +} + +func (cm *cacheManager) Checksum(ctx context.Context, ref cache.ImmutableRef, p string) (digest.Digest, error) { + cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata())) + if err != nil { + return "", nil + } + return cc.Checksum(ctx, ref, p) +} + +func (cm *cacheManager) GetCacheContext(ctx context.Context, md *metadata.StorageItem) (CacheContext, error) { + cm.locker.Lock(md.ID()) + cm.lruMu.Lock() + v, ok := cm.lru.Get(md.ID()) + cm.lruMu.Unlock() + if ok { + cm.locker.Unlock(md.ID()) + return v.(*cacheContext), nil + } + cc, err := newCacheContext(md) + if err != nil { + cm.locker.Unlock(md.ID()) + return nil, err + } + cm.lruMu.Lock() + cm.lru.Add(md.ID(), cc) + cm.lruMu.Unlock() + cm.locker.Unlock(md.ID()) + return cc, nil +} + +func (cm *cacheManager) SetCacheContext(ctx context.Context, md *metadata.StorageItem, cci CacheContext) error { + cc, ok := cci.(*cacheContext) + if !ok { + return errors.Errorf("invalid cachecontext: %T", cc) + } + if md.ID() != cc.md.ID() { + cc = &cacheContext{ + md: md, + tree: cci.(*cacheContext).tree, + dirtyMap: map[string]struct{}{}, + } + } else { + if err := cc.save(); err != nil { + return err + } + } + cm.lruMu.Lock() + cm.lru.Add(md.ID(), cc) + cm.lruMu.Unlock() + return nil +} + +type cacheContext struct { + mu sync.RWMutex + md *metadata.StorageItem + tree *iradix.Tree + dirty bool // needs to be persisted to disk + + // used in HandleChange + txn *iradix.Txn + node *iradix.Node + dirtyMap map[string]struct{} +} + +type mount struct { + mountable cache.Mountable + mountPath string + unmount func() error +} + +func (m *mount) mount(ctx context.Context) (string, error) { + if m.mountPath != "" { + return m.mountPath, nil + } + mounts, err := m.mountable.Mount(ctx, true) + if err != nil { + return "", err + } + + lm := snapshot.LocalMounter(mounts) + + mp, err := lm.Mount() + if err != nil { + return "", err + } + + m.mountPath = mp + m.unmount = lm.Unmount + return mp, nil +} + +func (m *mount) clean() error { + if m.mountPath != "" { + if err := m.unmount(); err != nil { + return err + } + m.mountPath = "" + } + return nil +} + +func newCacheContext(md *metadata.StorageItem) (*cacheContext, error) { + cc := &cacheContext{ + md: md, + tree: iradix.New(), + dirtyMap: map[string]struct{}{}, + } + if err := cc.load(); err != nil { + return nil, err + } + return cc, nil +} + +func (cc *cacheContext) load() error { + dt, err := cc.md.GetExternal(keyContentHash) + if err != nil { + return nil + } + + var l CacheRecords + if err := l.Unmarshal(dt); err != nil { + return err + } + + txn := cc.tree.Txn() + for _, p := range l.Paths { + txn.Insert([]byte(p.Path), p.Record) + } + cc.tree = txn.Commit() + return nil +} + +func (cc *cacheContext) save() error { + cc.mu.Lock() + defer cc.mu.Unlock() + + if cc.txn != nil { + cc.commitActiveTransaction() + } + + var l CacheRecords + node := cc.tree.Root() + node.Walk(func(k []byte, v interface{}) bool { + l.Paths = append(l.Paths, &CacheRecordWithPath{ + Path: string(k), + Record: v.(*CacheRecord), + }) + return false + }) + + dt, err := l.Marshal() + if err != nil { + return err + } + + return cc.md.SetExternal(keyContentHash, dt) +} + +// HandleChange notifies the source about a modification operation +func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { + p = path.Join("/", filepath.ToSlash(p)) + if p == "/" { + p = "" + } + k := convertPathToKey([]byte(p)) + + deleteDir := func(cr *CacheRecord) { + if cr.Type == CacheRecordTypeDir { + cc.node.WalkPrefix(append(k, 0), func(k []byte, v interface{}) bool { + cc.txn.Delete(k) + return false + }) + } + } + + cc.mu.Lock() + defer cc.mu.Unlock() + if cc.txn == nil { + cc.txn = cc.tree.Txn() + cc.node = cc.tree.Root() + + // root is not called by HandleChange. need to fake it + if _, ok := cc.node.Get([]byte{0}); !ok { + cc.txn.Insert([]byte{0}, &CacheRecord{ + Type: CacheRecordTypeDirHeader, + Digest: digest.FromBytes(nil), + }) + cc.txn.Insert([]byte(""), &CacheRecord{ + Type: CacheRecordTypeDir, + }) + } + } + + if kind == fsutil.ChangeKindDelete { + v, ok := cc.txn.Delete(k) + if ok { + deleteDir(v.(*CacheRecord)) + } + d := path.Dir(p) + if d == "/" { + d = "" + } + cc.dirtyMap[d] = struct{}{} + return + } + + stat, ok := fi.Sys().(*fsutil.Stat) + if !ok { + return errors.Errorf("%s invalid change without stat information", p) + } + + h, ok := fi.(Hashed) + if !ok { + return errors.Errorf("invalid fileinfo: %s", p) + } + + v, ok := cc.node.Get(k) + if ok { + deleteDir(v.(*CacheRecord)) + } + + cr := &CacheRecord{ + Type: CacheRecordTypeFile, + } + if fi.Mode()&os.ModeSymlink != 0 { + cr.Type = CacheRecordTypeSymlink + cr.Linkname = filepath.ToSlash(stat.Linkname) + } + if fi.IsDir() { + cr.Type = CacheRecordTypeDirHeader + cr2 := &CacheRecord{ + Type: CacheRecordTypeDir, + } + cc.txn.Insert(k, cr2) + k = append(k, 0) + p += "/" + } + cr.Digest = h.Digest() + cc.txn.Insert(k, cr) + d := path.Dir(p) + if d == "/" { + d = "" + } + cc.dirtyMap[d] = struct{}{} + + return nil +} + +func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable, p string) (digest.Digest, error) { + m := &mount{mountable: mountable} + defer m.clean() + + const maxSymlinkLimit = 255 + i := 0 + for { + if i > maxSymlinkLimit { + return "", errors.Errorf("too many symlinks: %s", p) + } + cr, err := cc.checksumNoFollow(ctx, m, p) + if err != nil { + return "", err + } + if cr.Type == CacheRecordTypeSymlink { + link := cr.Linkname + if !path.IsAbs(cr.Linkname) { + link = path.Join(path.Dir(p), link) + } + i++ + p = link + } else { + return cr.Digest, nil + } + } +} + +func (cc *cacheContext) checksumNoFollow(ctx context.Context, m *mount, p string) (*CacheRecord, error) { + p = path.Join("/", filepath.ToSlash(p)) + if p == "/" { + p = "" + } + + cc.mu.RLock() + if cc.txn == nil { + root := cc.tree.Root() + cc.mu.RUnlock() + v, ok := root.Get(convertPathToKey([]byte(p))) + if ok { + cr := v.(*CacheRecord) + if cr.Digest != "" { + return cr, nil + } + } + } else { + cc.mu.RUnlock() + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + if cc.txn != nil { + cc.commitActiveTransaction() + } + + defer func() { + if cc.dirty { + go cc.save() + cc.dirty = false + } + }() + + return cc.lazyChecksum(ctx, m, p) +} + +func (cc *cacheContext) commitActiveTransaction() { + for d := range cc.dirtyMap { + addParentToMap(d, cc.dirtyMap) + } + for d := range cc.dirtyMap { + k := convertPathToKey([]byte(d)) + if _, ok := cc.txn.Get(k); ok { + cc.txn.Insert(k, &CacheRecord{Type: CacheRecordTypeDir}) + } + } + cc.tree = cc.txn.Commit() + cc.node = nil + cc.dirtyMap = map[string]struct{}{} + cc.txn = nil +} + +func (cc *cacheContext) lazyChecksum(ctx context.Context, m *mount, p string) (*CacheRecord, error) { + root := cc.tree.Root() + if cc.needsScan(root, p) { + if err := cc.scanPath(ctx, m, p); err != nil { + return nil, err + } + } + k := convertPathToKey([]byte(p)) + txn := cc.tree.Txn() + root = txn.Root() + cr, updated, err := cc.checksum(ctx, root, txn, m, k) + if err != nil { + return nil, err + } + cc.tree = txn.Commit() + cc.dirty = updated + return cr, err +} + +func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *iradix.Txn, m *mount, k []byte) (*CacheRecord, bool, error) { + v, ok := root.Get(k) + + if !ok { + return nil, false, errors.Wrapf(errNotFound, "%s not found", convertKeyToPath(k)) + } + cr := v.(*CacheRecord) + + if cr.Digest != "" { + return cr, false, nil + } + var dgst digest.Digest + + switch cr.Type { + case CacheRecordTypeDir: + h := sha256.New() + next := append(k, 0) + iter := root.Seek(next) + subk := next + ok := true + for { + if !ok || !bytes.HasPrefix(subk, next) { + break + } + h.Write(bytes.TrimPrefix(subk, k)) + + subcr, _, err := cc.checksum(ctx, root, txn, m, subk) + if err != nil { + return nil, false, err + } + + h.Write([]byte(subcr.Digest)) + + if subcr.Type == CacheRecordTypeDir { // skip subfiles + next := append(subk, 0, 0xff) + iter = root.Seek(next) + } + subk, _, ok = iter.Next() + } + dgst = digest.NewDigest(digest.SHA256, h) + + default: + p := string(convertKeyToPath(bytes.TrimSuffix(k, []byte{0}))) + + target, err := m.mount(ctx) + if err != nil { + return nil, false, err + } + + // no FollowSymlinkInScope because invalid paths should not be inserted + fp := filepath.Join(target, filepath.FromSlash(p)) + + fi, err := os.Lstat(fp) + if err != nil { + return nil, false, err + } + + dgst, err = prepareDigest(fp, p, fi) + if err != nil { + return nil, false, err + } + } + + cr2 := &CacheRecord{ + Digest: dgst, + Type: cr.Type, + Linkname: cr.Linkname, + } + + txn.Insert(k, cr2) + + return cr2, true, nil +} + +func (cc *cacheContext) needsScan(root *iradix.Node, p string) bool { + if p == "/" { + p = "" + } + if _, ok := root.Get(convertPathToKey([]byte(p))); !ok { + if p == "" { + return true + } + return cc.needsScan(root, path.Clean(path.Dir(p))) + } + return false +} + +func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retErr error) { + p = path.Join("/", p) + d, _ := path.Split(p) + + mp, err := m.mount(ctx) + if err != nil { + return err + } + + parentPath, err := fs.RootPath(mp, filepath.FromSlash(d)) + if err != nil { + return err + } + + n := cc.tree.Root() + txn := cc.tree.Txn() + + err = filepath.Walk(parentPath, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return errors.Wrapf(err, "failed to walk %s", path) + } + rel, err := filepath.Rel(mp, path) + if err != nil { + return err + } + k := []byte(filepath.Join("/", filepath.ToSlash(rel))) + if string(k) == "/" { + k = []byte{} + } + k = convertPathToKey(k) + if _, ok := n.Get(k); !ok { + cr := &CacheRecord{ + Type: CacheRecordTypeFile, + } + if fi.Mode()&os.ModeSymlink != 0 { + cr.Type = CacheRecordTypeSymlink + link, err := os.Readlink(path) + if err != nil { + return err + } + cr.Linkname = filepath.ToSlash(link) + } + if fi.IsDir() { + cr.Type = CacheRecordTypeDirHeader + cr2 := &CacheRecord{ + Type: CacheRecordTypeDir, + } + txn.Insert(k, cr2) + k = append(k, 0) + } + txn.Insert(k, cr) + } + return nil + }) + if err != nil { + return err + } + + cc.tree = txn.Commit() + return nil +} + +func prepareDigest(fp, p string, fi os.FileInfo) (digest.Digest, error) { + h, err := NewFileHash(fp, fi) + if err != nil { + return "", errors.Wrapf(err, "failed to create hash for %s", p) + } + if fi.Mode().IsRegular() && fi.Size() > 0 { + // TODO: would be nice to put the contents to separate hash first + // so it can be cached for hardlinks + f, err := os.Open(fp) + if err != nil { + return "", errors.Wrapf(err, "failed to open %s", p) + } + defer f.Close() + if _, err := poolsCopy(h, f); err != nil { + return "", errors.Wrapf(err, "failed to copy file data for %s", p) + } + } + return digest.NewDigest(digest.SHA256, h), nil +} + +func addParentToMap(d string, m map[string]struct{}) { + if d == "" { + return + } + d = path.Dir(d) + if d == "/" { + d = "" + } + m[d] = struct{}{} + addParentToMap(d, m) +} + +func ensureOriginMetadata(md *metadata.StorageItem) *metadata.StorageItem { + v := md.Get("cache.equalMutable") // TODO: const + if v == nil { + return md + } + var mutable string + if err := v.Unmarshal(&mutable); err != nil { + return md + } + si, ok := md.Storage().Get(mutable) + if ok { + return si + } + return md +} + +var pool32K = sync.Pool{ + New: func() interface{} { return make([]byte, 32*1024) }, // 32K +} + +func poolsCopy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := pool32K.Get().([]byte) + written, err = io.CopyBuffer(dst, src, buf) + pool32K.Put(buf) + return +} + +func convertPathToKey(p []byte) []byte { + return bytes.Replace([]byte(p), []byte("/"), []byte{0}, -1) +} + +func convertKeyToPath(p []byte) []byte { + return bytes.Replace([]byte(p), []byte{0}, []byte("/"), -1) +} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.pb.go b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.pb.go new file mode 100644 index 0000000000..31dcce95ff --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.pb.go @@ -0,0 +1,755 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: checksum.proto + +/* + Package contenthash is a generated protocol buffer package. + + It is generated from these files: + checksum.proto + + It has these top-level messages: + CacheRecord + CacheRecordWithPath + CacheRecords +*/ +package contenthash + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type CacheRecordType int32 + +const ( + CacheRecordTypeFile CacheRecordType = 0 + CacheRecordTypeDir CacheRecordType = 1 + CacheRecordTypeDirHeader CacheRecordType = 2 + CacheRecordTypeSymlink CacheRecordType = 3 +) + +var CacheRecordType_name = map[int32]string{ + 0: "FILE", + 1: "DIR", + 2: "DIR_HEADER", + 3: "SYMLINK", +} +var CacheRecordType_value = map[string]int32{ + "FILE": 0, + "DIR": 1, + "DIR_HEADER": 2, + "SYMLINK": 3, +} + +func (x CacheRecordType) String() string { + return proto.EnumName(CacheRecordType_name, int32(x)) +} +func (CacheRecordType) EnumDescriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{0} } + +type CacheRecord struct { + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + Type CacheRecordType `protobuf:"varint,2,opt,name=type,proto3,enum=contenthash.CacheRecordType" json:"type,omitempty"` + Linkname string `protobuf:"bytes,3,opt,name=linkname,proto3" json:"linkname,omitempty"` +} + +func (m *CacheRecord) Reset() { *m = CacheRecord{} } +func (m *CacheRecord) String() string { return proto.CompactTextString(m) } +func (*CacheRecord) ProtoMessage() {} +func (*CacheRecord) Descriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{0} } + +func (m *CacheRecord) GetType() CacheRecordType { + if m != nil { + return m.Type + } + return CacheRecordTypeFile +} + +func (m *CacheRecord) GetLinkname() string { + if m != nil { + return m.Linkname + } + return "" +} + +type CacheRecordWithPath struct { + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Record *CacheRecord `protobuf:"bytes,2,opt,name=record" json:"record,omitempty"` +} + +func (m *CacheRecordWithPath) Reset() { *m = CacheRecordWithPath{} } +func (m *CacheRecordWithPath) String() string { return proto.CompactTextString(m) } +func (*CacheRecordWithPath) ProtoMessage() {} +func (*CacheRecordWithPath) Descriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{1} } + +func (m *CacheRecordWithPath) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *CacheRecordWithPath) GetRecord() *CacheRecord { + if m != nil { + return m.Record + } + return nil +} + +type CacheRecords struct { + Paths []*CacheRecordWithPath `protobuf:"bytes,1,rep,name=paths" json:"paths,omitempty"` +} + +func (m *CacheRecords) Reset() { *m = CacheRecords{} } +func (m *CacheRecords) String() string { return proto.CompactTextString(m) } +func (*CacheRecords) ProtoMessage() {} +func (*CacheRecords) Descriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{2} } + +func (m *CacheRecords) GetPaths() []*CacheRecordWithPath { + if m != nil { + return m.Paths + } + return nil +} + +func init() { + proto.RegisterType((*CacheRecord)(nil), "contenthash.CacheRecord") + proto.RegisterType((*CacheRecordWithPath)(nil), "contenthash.CacheRecordWithPath") + proto.RegisterType((*CacheRecords)(nil), "contenthash.CacheRecords") + proto.RegisterEnum("contenthash.CacheRecordType", CacheRecordType_name, CacheRecordType_value) +} +func (m *CacheRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CacheRecord) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Digest) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintChecksum(dAtA, i, uint64(len(m.Digest))) + i += copy(dAtA[i:], m.Digest) + } + if m.Type != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintChecksum(dAtA, i, uint64(m.Type)) + } + if len(m.Linkname) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintChecksum(dAtA, i, uint64(len(m.Linkname))) + i += copy(dAtA[i:], m.Linkname) + } + return i, nil +} + +func (m *CacheRecordWithPath) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CacheRecordWithPath) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Path) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintChecksum(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + } + if m.Record != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintChecksum(dAtA, i, uint64(m.Record.Size())) + n1, err := m.Record.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil +} + +func (m *CacheRecords) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CacheRecords) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for _, msg := range m.Paths { + dAtA[i] = 0xa + i++ + i = encodeVarintChecksum(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeVarintChecksum(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *CacheRecord) Size() (n int) { + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovChecksum(uint64(l)) + } + if m.Type != 0 { + n += 1 + sovChecksum(uint64(m.Type)) + } + l = len(m.Linkname) + if l > 0 { + n += 1 + l + sovChecksum(uint64(l)) + } + return n +} + +func (m *CacheRecordWithPath) Size() (n int) { + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovChecksum(uint64(l)) + } + if m.Record != nil { + l = m.Record.Size() + n += 1 + l + sovChecksum(uint64(l)) + } + return n +} + +func (m *CacheRecords) Size() (n int) { + var l int + _ = l + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovChecksum(uint64(l)) + } + } + return n +} + +func sovChecksum(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozChecksum(x uint64) (n int) { + return sovChecksum(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *CacheRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChecksum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CacheRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CacheRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChecksum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChecksum + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChecksum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (CacheRecordType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Linkname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChecksum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChecksum + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Linkname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipChecksum(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthChecksum + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CacheRecordWithPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChecksum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CacheRecordWithPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CacheRecordWithPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChecksum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChecksum + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChecksum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthChecksum + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Record == nil { + m.Record = &CacheRecord{} + } + if err := m.Record.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipChecksum(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthChecksum + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CacheRecords) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChecksum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CacheRecords: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CacheRecords: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChecksum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthChecksum + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, &CacheRecordWithPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipChecksum(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthChecksum + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipChecksum(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowChecksum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowChecksum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowChecksum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthChecksum + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowChecksum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipChecksum(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthChecksum = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowChecksum = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("checksum.proto", fileDescriptorChecksum) } + +var fileDescriptorChecksum = []byte{ + // 418 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xc1, 0x6a, 0xd4, 0x40, + 0x18, 0xc7, 0x77, 0xba, 0xeb, 0xaa, 0xdf, 0x4a, 0x0d, 0x53, 0x68, 0xc3, 0x50, 0xb2, 0xe3, 0x5e, + 0x5c, 0x8a, 0xcd, 0x96, 0x08, 0xde, 0xad, 0xd9, 0xa5, 0xd1, 0x2a, 0x32, 0x15, 0x44, 0x3c, 0x48, + 0x36, 0x3b, 0x66, 0x42, 0x9b, 0x4c, 0x48, 0x66, 0x0f, 0xfb, 0x06, 0x92, 0x93, 0x2f, 0x90, 0x93, + 0x82, 0xef, 0xe0, 0x5d, 0xe8, 0xd1, 0xb3, 0x87, 0x22, 0xeb, 0x8b, 0x48, 0x26, 0x55, 0x42, 0xca, + 0x9e, 0xe6, 0xfb, 0x66, 0x7e, 0xdf, 0xff, 0xff, 0x9f, 0x61, 0x60, 0x3b, 0x10, 0x3c, 0x38, 0xcf, + 0x97, 0xb1, 0x9d, 0x66, 0x52, 0x49, 0x3c, 0x08, 0x64, 0xa2, 0x78, 0xa2, 0x84, 0x9f, 0x0b, 0x72, + 0x18, 0x46, 0x4a, 0x2c, 0xe7, 0x76, 0x20, 0xe3, 0x49, 0x28, 0x43, 0x39, 0xd1, 0xcc, 0x7c, 0xf9, + 0x51, 0x77, 0xba, 0xd1, 0x55, 0x3d, 0x3b, 0xfa, 0x86, 0x60, 0xf0, 0xcc, 0x0f, 0x04, 0x67, 0x3c, + 0x90, 0xd9, 0x02, 0x3f, 0x87, 0xfe, 0x22, 0x0a, 0x79, 0xae, 0x4c, 0x44, 0xd1, 0xf8, 0xee, 0xb1, + 0x73, 0x79, 0x35, 0xec, 0xfc, 0xba, 0x1a, 0x1e, 0x34, 0x64, 0x65, 0xca, 0x93, 0xca, 0xd2, 0x8f, + 0x12, 0x9e, 0xe5, 0x93, 0x50, 0x1e, 0xd6, 0x23, 0xb6, 0xab, 0x17, 0x76, 0xad, 0x80, 0x8f, 0xa0, + 0xa7, 0x56, 0x29, 0x37, 0xb7, 0x28, 0x1a, 0x6f, 0x3b, 0xfb, 0x76, 0x23, 0xa6, 0xdd, 0xf0, 0x7c, + 0xb3, 0x4a, 0x39, 0xd3, 0x24, 0x26, 0x70, 0xe7, 0x22, 0x4a, 0xce, 0x13, 0x3f, 0xe6, 0x66, 0xb7, + 0xf2, 0x67, 0xff, 0xfb, 0xd1, 0x7b, 0xd8, 0x69, 0x0c, 0xbd, 0x8d, 0x94, 0x78, 0xed, 0x2b, 0x81, + 0x31, 0xf4, 0x52, 0x5f, 0x89, 0x3a, 0x2e, 0xd3, 0x35, 0x3e, 0x82, 0x7e, 0xa6, 0x29, 0x6d, 0x3d, + 0x70, 0xcc, 0x4d, 0xd6, 0xec, 0x9a, 0x1b, 0xcd, 0xe0, 0x5e, 0x63, 0x3b, 0xc7, 0x4f, 0xe0, 0x56, + 0xa5, 0x94, 0x9b, 0x88, 0x76, 0xc7, 0x03, 0x87, 0x6e, 0x12, 0xf8, 0x17, 0x83, 0xd5, 0xf8, 0xc1, + 0x0f, 0x04, 0xf7, 0x5b, 0x57, 0xc3, 0x0f, 0xa0, 0x37, 0xf3, 0x4e, 0xa7, 0x46, 0x87, 0xec, 0x15, + 0x25, 0xdd, 0x69, 0x1d, 0xcf, 0xa2, 0x0b, 0x8e, 0x87, 0xd0, 0x75, 0x3d, 0x66, 0x20, 0xb2, 0x5b, + 0x94, 0x14, 0xb7, 0x08, 0x37, 0xca, 0xf0, 0x23, 0x00, 0xd7, 0x63, 0x1f, 0x4e, 0xa6, 0x4f, 0xdd, + 0x29, 0x33, 0xb6, 0xc8, 0x7e, 0x51, 0x52, 0xf3, 0x26, 0x77, 0xc2, 0xfd, 0x05, 0xcf, 0xf0, 0x43, + 0xb8, 0x7d, 0xf6, 0xee, 0xe5, 0xa9, 0xf7, 0xea, 0x85, 0xd1, 0x25, 0xa4, 0x28, 0xe9, 0x6e, 0x0b, + 0x3d, 0x5b, 0xc5, 0xd5, 0xbb, 0x92, 0xbd, 0x4f, 0x5f, 0xac, 0xce, 0xf7, 0xaf, 0x56, 0x3b, 0xf3, + 0xb1, 0x71, 0xb9, 0xb6, 0xd0, 0xcf, 0xb5, 0x85, 0x7e, 0xaf, 0x2d, 0xf4, 0xf9, 0x8f, 0xd5, 0x99, + 0xf7, 0xf5, 0x7f, 0x79, 0xfc, 0x37, 0x00, 0x00, 0xff, 0xff, 0x55, 0xf2, 0x2e, 0x06, 0x7d, 0x02, + 0x00, 0x00, +} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.proto b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.proto new file mode 100644 index 0000000000..d6e524ea7c --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package contenthash; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +enum CacheRecordType { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "CacheRecordType"; + + FILE = 0 [(gogoproto.enumvalue_customname) = "CacheRecordTypeFile"]; + DIR = 1 [(gogoproto.enumvalue_customname) = "CacheRecordTypeDir"]; + DIR_HEADER = 2 [(gogoproto.enumvalue_customname) = "CacheRecordTypeDirHeader"]; + SYMLINK = 3 [(gogoproto.enumvalue_customname) = "CacheRecordTypeSymlink"]; +} + +message CacheRecord { + string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + CacheRecordType type = 2; + string linkname = 3; +} + +message CacheRecordWithPath { + string path = 1; + CacheRecord record = 2; +} + +message CacheRecords { + repeated CacheRecordWithPath paths = 1; +} \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/filehash.go b/vendor/github.com/moby/buildkit/cache/contenthash/filehash.go new file mode 100644 index 0000000000..46c15cc6df --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/contenthash/filehash.go @@ -0,0 +1,98 @@ +package contenthash + +import ( + "archive/tar" + "crypto/sha256" + "hash" + "os" + "path/filepath" + "time" + + "github.com/tonistiigi/fsutil" +) + +// NewFileHash returns new hash that is used for the builder cache keys +func NewFileHash(path string, fi os.FileInfo) (hash.Hash, error) { + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return nil, err + } + } + + stat := &fsutil.Stat{ + Mode: uint32(fi.Mode()), + Size_: fi.Size(), + ModTime: fi.ModTime().UnixNano(), + Linkname: link, + } + + if fi.Mode()&os.ModeSymlink != 0 { + stat.Mode = stat.Mode | 0777 + } + + if err := setUnixOpt(path, fi, stat); err != nil { + return nil, err + } + return NewFromStat(stat) +} + +func NewFromStat(stat *fsutil.Stat) (hash.Hash, error) { + fi := &statInfo{stat} + hdr, err := tar.FileInfoHeader(fi, stat.Linkname) + if err != nil { + return nil, err + } + hdr.Name = "" // note: empty name is different from current has in docker build. Name is added on recursive directory scan instead + hdr.Mode = int64(chmodWindowsTarEntry(os.FileMode(hdr.Mode))) + hdr.Devmajor = stat.Devmajor + hdr.Devminor = stat.Devminor + + if len(stat.Xattrs) > 0 { + hdr.Xattrs = make(map[string]string, len(stat.Xattrs)) + for k, v := range stat.Xattrs { + hdr.Xattrs[k] = string(v) + } + } + // fmt.Printf("hdr: %#v\n", hdr) + tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()} + tsh.Reset() // initialize header + return tsh, nil +} + +type tarsumHash struct { + hash.Hash + hdr *tar.Header +} + +// Reset resets the Hash to its initial state. +func (tsh *tarsumHash) Reset() { + // comply with hash.Hash and reset to the state hash had before any writes + tsh.Hash.Reset() + WriteV1TarsumHeaders(tsh.hdr, tsh.Hash) +} + +type statInfo struct { + *fsutil.Stat +} + +func (s *statInfo) Name() string { + return filepath.Base(s.Stat.Path) +} +func (s *statInfo) Size() int64 { + return s.Stat.Size_ +} +func (s *statInfo) Mode() os.FileMode { + return os.FileMode(s.Stat.Mode) +} +func (s *statInfo) ModTime() time.Time { + return time.Unix(s.Stat.ModTime/1e9, s.Stat.ModTime%1e9) +} +func (s *statInfo) IsDir() bool { + return s.Mode().IsDir() +} +func (s *statInfo) Sys() interface{} { + return s.Stat +} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go b/vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go new file mode 100644 index 0000000000..0939112cb7 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go @@ -0,0 +1,47 @@ +// +build !windows + +package contenthash + +import ( + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/tonistiigi/fsutil" + + "golang.org/x/sys/unix" +) + +func chmodWindowsTarEntry(perm os.FileMode) os.FileMode { + return perm +} + +func setUnixOpt(path string, fi os.FileInfo, stat *fsutil.Stat) error { + s := fi.Sys().(*syscall.Stat_t) + + stat.Uid = s.Uid + stat.Gid = s.Gid + + if !fi.IsDir() { + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + stat.Devmajor = int64(unix.Major(uint64(s.Rdev))) + stat.Devminor = int64(unix.Minor(uint64(s.Rdev))) + } + } + + attrs, err := sysx.LListxattr(path) + if err != nil { + return err + } + if len(attrs) > 0 { + stat.Xattrs = map[string][]byte{} + for _, attr := range attrs { + v, err := sysx.LGetxattr(path, attr) + if err == nil { + stat.Xattrs[attr] = v + } + } + } + return nil +} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go b/vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go new file mode 100644 index 0000000000..340954aa36 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go @@ -0,0 +1,23 @@ +// +build windows + +package contenthash + +import ( + "os" + + "github.com/tonistiigi/fsutil" +) + +// chmodWindowsTarEntry is used to adjust the file permissions used in tar +// header based on the platform the archival is done. +func chmodWindowsTarEntry(perm os.FileMode) os.FileMode { + perm &= 0755 + // Add the x bit: make everything +x from windows + perm |= 0111 + + return perm +} + +func setUnixOpt(path string, fi os.FileInfo, stat *fsutil.Stat) error { + return nil +} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/generate.go b/vendor/github.com/moby/buildkit/cache/contenthash/generate.go new file mode 100644 index 0000000000..e4bd2c50c0 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/contenthash/generate.go @@ -0,0 +1,3 @@ +package contenthash + +//go:generate protoc -I=. -I=../../vendor/ --gogofaster_out=. checksum.proto diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go b/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go new file mode 100644 index 0000000000..601c41ecb9 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go @@ -0,0 +1,60 @@ +package contenthash + +import ( + "archive/tar" + "io" + "sort" + "strconv" +) + +// WriteV1TarsumHeaders writes a tar header to a writer in V1 tarsum format. +func WriteV1TarsumHeaders(h *tar.Header, w io.Writer) { + for _, elem := range v1TarHeaderSelect(h) { + w.Write([]byte(elem[0] + elem[1])) + } +} + +// Functions below are from docker legacy tarsum implementation. +// There is no valid technical reason to continue using them. + +func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + return [][2]string{ + {"name", h.Name}, + {"mode", strconv.FormatInt(h.Mode, 10)}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.FormatInt(h.Size, 10)}, + {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, + {"devminor", strconv.FormatInt(h.Devminor, 10)}, + } +} + +func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + // Get extended attributes. + xAttrKeys := make([]string, len(h.Xattrs)) + for k := range h.Xattrs { + xAttrKeys = append(xAttrKeys, k) + } + sort.Strings(xAttrKeys) + + // Make the slice with enough capacity to hold the 11 basic headers + // we want from the v0 selector plus however many xattrs we have. + orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) + + // Copy all headers from v0 excluding the 'mtime' header (the 5th element). + v0headers := v0TarHeaderSelect(h) + orderedHeaders = append(orderedHeaders, v0headers[0:5]...) + orderedHeaders = append(orderedHeaders, v0headers[6:]...) + + // Finally, append the sorted xattrs. + for _, k := range xAttrKeys { + orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) + } + + return +} diff --git a/vendor/github.com/moby/buildkit/cache/fsutil.go b/vendor/github.com/moby/buildkit/cache/fsutil.go new file mode 100644 index 0000000000..ec757d5e88 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/fsutil.go @@ -0,0 +1,71 @@ +package cache + +import ( + "context" + "io" + "io/ioutil" + "os" + + "github.com/containerd/continuity/fs" + "github.com/moby/buildkit/snapshot" +) + +type ReadRequest struct { + Filename string + Range *FileRange +} + +type FileRange struct { + Offset int + Length int +} + +func ReadFile(ctx context.Context, ref ImmutableRef, req ReadRequest) ([]byte, error) { + mount, err := ref.Mount(ctx, true) + if err != nil { + return nil, err + } + + lm := snapshot.LocalMounter(mount) + + root, err := lm.Mount() + if err != nil { + return nil, err + } + + defer func() { + if lm != nil { + lm.Unmount() + } + }() + + fp, err := fs.RootPath(root, req.Filename) + if err != nil { + return nil, err + } + + var dt []byte + + if req.Range == nil { + dt, err = ioutil.ReadFile(fp) + if err != nil { + return nil, err + } + } else { + f, err := os.Open(fp) + if err != nil { + return nil, err + } + dt, err = ioutil.ReadAll(io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length))) + f.Close() + if err != nil { + return nil, err + } + } + + if err := lm.Unmount(); err != nil { + return nil, err + } + lm = nil + return dt, err +} diff --git a/vendor/github.com/moby/buildkit/cache/gc.go b/vendor/github.com/moby/buildkit/cache/gc.go new file mode 100644 index 0000000000..31a98b93c8 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/gc.go @@ -0,0 +1,27 @@ +package cache + +import ( + "context" + "errors" + "time" +) + +// GCPolicy defines policy for garbage collection +type GCPolicy struct { + MaxSize uint64 + MaxKeepDuration time.Duration +} + +// // CachePolicy defines policy for keeping a resource in cache +// type CachePolicy struct { +// Priority int +// LastUsed time.Time +// } +// +// func defaultCachePolicy() CachePolicy { +// return CachePolicy{Priority: 10, LastUsed: time.Now()} +// } + +func (cm *cacheManager) GC(ctx context.Context) error { + return errors.New("GC not implemented") +} diff --git a/vendor/github.com/moby/buildkit/cache/manager.go b/vendor/github.com/moby/buildkit/cache/manager.go new file mode 100644 index 0000000000..f0ea2e77a9 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/manager.go @@ -0,0 +1,573 @@ +package cache + +import ( + "context" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/snapshots" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/snapshot" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" +) + +var ( + errLocked = errors.New("locked") + errNotFound = errors.New("not found") + errInvalid = errors.New("invalid") +) + +type ManagerOpt struct { + Snapshotter snapshot.SnapshotterBase + GCPolicy GCPolicy + MetadataStore *metadata.Store +} + +type Accessor interface { + Get(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error) + GetFromSnapshotter(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error) + New(ctx context.Context, s ImmutableRef, opts ...RefOption) (MutableRef, error) + GetMutable(ctx context.Context, id string) (MutableRef, error) // Rebase? +} + +type Controller interface { + DiskUsage(ctx context.Context, info client.DiskUsageInfo) ([]*client.UsageInfo, error) + Prune(ctx context.Context, ch chan client.UsageInfo) error + GC(ctx context.Context) error +} + +type Manager interface { + Accessor + Controller + Close() error +} + +type cacheManager struct { + records map[string]*cacheRecord + mu sync.Mutex + ManagerOpt + md *metadata.Store + + muPrune sync.Mutex // make sure parallel prune is not allowed so there will not be inconsistent results +} + +func NewManager(opt ManagerOpt) (Manager, error) { + cm := &cacheManager{ + ManagerOpt: opt, + md: opt.MetadataStore, + records: make(map[string]*cacheRecord), + } + + if err := cm.init(context.TODO()); err != nil { + return nil, err + } + + // cm.scheduleGC(5 * time.Minute) + + return cm, nil +} + +// init loads all snapshots from metadata state and tries to load the records +// from the snapshotter. If snaphot can't be found, metadata is deleted as well. +func (cm *cacheManager) init(ctx context.Context) error { + items, err := cm.md.All() + if err != nil { + return err + } + + for _, si := range items { + if _, err := cm.getRecord(ctx, si.ID(), false); err != nil { + logrus.Debugf("could not load snapshot %s: %v", si.ID(), err) + cm.md.Clear(si.ID()) + // TODO: make sure content is deleted as well + } + } + return nil +} + +// Close closes the manager and releases the metadata database lock. No other +// method should be called after Close. +func (cm *cacheManager) Close() error { + // TODO: allocate internal context and cancel it here + return cm.md.Close() +} + +// Get returns an immutable snapshot reference for ID +func (cm *cacheManager) Get(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error) { + cm.mu.Lock() + defer cm.mu.Unlock() + return cm.get(ctx, id, false, opts...) +} + +// Get returns an immutable snapshot reference for ID +func (cm *cacheManager) GetFromSnapshotter(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error) { + cm.mu.Lock() + defer cm.mu.Unlock() + return cm.get(ctx, id, true, opts...) +} + +// get requires manager lock to be taken +func (cm *cacheManager) get(ctx context.Context, id string, fromSnapshotter bool, opts ...RefOption) (ImmutableRef, error) { + rec, err := cm.getRecord(ctx, id, fromSnapshotter, opts...) + if err != nil { + return nil, err + } + rec.mu.Lock() + defer rec.mu.Unlock() + + if rec.mutable { + if len(rec.refs) != 0 { + return nil, errors.Wrapf(errLocked, "%s is locked", id) + } + if rec.equalImmutable != nil { + return rec.equalImmutable.ref(), nil + } + return rec.mref().commit(ctx) + } + + return rec.ref(), nil +} + +// getRecord returns record for id. Requires manager lock. +func (cm *cacheManager) getRecord(ctx context.Context, id string, fromSnapshotter bool, opts ...RefOption) (cr *cacheRecord, retErr error) { + if rec, ok := cm.records[id]; ok { + if rec.isDead() { + return nil, errNotFound + } + return rec, nil + } + + md, ok := cm.md.Get(id) + if !ok && !fromSnapshotter { + return nil, errNotFound + } + if mutableID := getEqualMutable(md); mutableID != "" { + mutable, err := cm.getRecord(ctx, mutableID, fromSnapshotter) + if err != nil { + // check loading mutable deleted record from disk + if errors.Cause(err) == errNotFound { + cm.md.Clear(id) + } + return nil, err + } + rec := &cacheRecord{ + mu: &sync.Mutex{}, + cm: cm, + refs: make(map[Mountable]struct{}), + parent: mutable.Parent(), + md: md, + equalMutable: &mutableRef{cacheRecord: mutable}, + } + mutable.equalImmutable = &immutableRef{cacheRecord: rec} + cm.records[id] = rec + return rec, nil + } + + info, err := cm.Snapshotter.Stat(ctx, id) + if err != nil { + return nil, errors.Wrap(errNotFound, err.Error()) + } + + var parent ImmutableRef + if info.Parent != "" { + parent, err = cm.get(ctx, info.Parent, fromSnapshotter, opts...) + if err != nil { + return nil, err + } + defer func() { + if retErr != nil { + parent.Release(context.TODO()) + } + }() + } + + rec := &cacheRecord{ + mu: &sync.Mutex{}, + mutable: info.Kind != snapshots.KindCommitted, + cm: cm, + refs: make(map[Mountable]struct{}), + parent: parent, + md: md, + } + + // the record was deleted but we crashed before data on disk was removed + if getDeleted(md) { + if err := rec.remove(ctx, true); err != nil { + return nil, err + } + return nil, errNotFound + } + + if err := initializeMetadata(rec, opts...); err != nil { + if parent != nil { + parent.Release(context.TODO()) + } + return nil, err + } + + cm.records[id] = rec + return rec, nil +} + +func (cm *cacheManager) New(ctx context.Context, s ImmutableRef, opts ...RefOption) (MutableRef, error) { + id := identity.NewID() + + var parent ImmutableRef + var parentID string + if s != nil { + var err error + parent, err = cm.Get(ctx, s.ID()) + if err != nil { + return nil, err + } + if err := parent.Finalize(ctx); err != nil { + return nil, err + } + parentID = parent.ID() + } + + if err := cm.Snapshotter.Prepare(ctx, id, parentID); err != nil { + if parent != nil { + parent.Release(context.TODO()) + } + return nil, errors.Wrapf(err, "failed to prepare %s", id) + } + + md, _ := cm.md.Get(id) + + rec := &cacheRecord{ + mu: &sync.Mutex{}, + mutable: true, + cm: cm, + refs: make(map[Mountable]struct{}), + parent: parent, + md: md, + } + + if err := initializeMetadata(rec, opts...); err != nil { + if parent != nil { + parent.Release(context.TODO()) + } + return nil, err + } + + cm.mu.Lock() + defer cm.mu.Unlock() + + cm.records[id] = rec // TODO: save to db + + return rec.mref(), nil +} +func (cm *cacheManager) GetMutable(ctx context.Context, id string) (MutableRef, error) { + cm.mu.Lock() + defer cm.mu.Unlock() + + rec, err := cm.getRecord(ctx, id, false) + if err != nil { + return nil, err + } + + rec.mu.Lock() + defer rec.mu.Unlock() + if !rec.mutable { + return nil, errors.Wrapf(errInvalid, "%s is not mutable", id) + } + + if len(rec.refs) != 0 { + return nil, errors.Wrapf(errLocked, "%s is locked", id) + } + + if rec.equalImmutable != nil { + if len(rec.equalImmutable.refs) != 0 { + return nil, errors.Wrapf(errLocked, "%s is locked", id) + } + delete(cm.records, rec.equalImmutable.ID()) + if err := rec.equalImmutable.remove(ctx, false); err != nil { + return nil, err + } + rec.equalImmutable = nil + } + + return rec.mref(), nil +} + +func (cm *cacheManager) Prune(ctx context.Context, ch chan client.UsageInfo) error { + cm.muPrune.Lock() + defer cm.muPrune.Unlock() + return cm.prune(ctx, ch) +} + +func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo) error { + var toDelete []*cacheRecord + cm.mu.Lock() + + for _, cr := range cm.records { + cr.mu.Lock() + + // ignore duplicates that share data + if cr.equalImmutable != nil && len(cr.equalImmutable.refs) > 0 || cr.equalMutable != nil && len(cr.refs) == 0 { + cr.mu.Unlock() + continue + } + + if cr.isDead() { + cr.mu.Unlock() + continue + } + + if len(cr.refs) == 0 { + cr.dead = true + toDelete = append(toDelete, cr) + } + + // mark metadata as deleted in case we crash before cleanup finished + if err := setDeleted(cr.md); err != nil { + cr.mu.Unlock() + cm.mu.Unlock() + return err + } + cr.mu.Unlock() + } + + cm.mu.Unlock() + + if len(toDelete) == 0 { + return nil + } + + var err error + for _, cr := range toDelete { + cr.mu.Lock() + + usageCount, lastUsedAt := getLastUsed(cr.md) + + c := client.UsageInfo{ + ID: cr.ID(), + Mutable: cr.mutable, + InUse: len(cr.refs) > 0, + Size: getSize(cr.md), + CreatedAt: GetCreatedAt(cr.md), + Description: GetDescription(cr.md), + LastUsedAt: lastUsedAt, + UsageCount: usageCount, + } + + if cr.parent != nil { + c.Parent = cr.parent.ID() + } + + if c.Size == sizeUnknown { + cr.mu.Unlock() // all the non-prune modifications already protected by cr.dead + s, err := cr.Size(ctx) + if err != nil { + return err + } + c.Size = s + cr.mu.Lock() + } + + if cr.equalImmutable != nil { + if err1 := cr.equalImmutable.remove(ctx, false); err == nil { + err = err1 + } + } + if err1 := cr.remove(ctx, true); err == nil { + err = err1 + } + + if err == nil && ch != nil { + ch <- c + } + cr.mu.Unlock() + } + if err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + return cm.prune(ctx, ch) + } +} + +func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) { + cm.mu.Lock() + + type cacheUsageInfo struct { + refs int + parent string + size int64 + mutable bool + createdAt time.Time + usageCount int + lastUsedAt *time.Time + description string + doubleRef bool + } + + m := make(map[string]*cacheUsageInfo, len(cm.records)) + rescan := make(map[string]struct{}, len(cm.records)) + + for id, cr := range cm.records { + cr.mu.Lock() + // ignore duplicates that share data + if cr.equalImmutable != nil && len(cr.equalImmutable.refs) > 0 || cr.equalMutable != nil && len(cr.refs) == 0 { + cr.mu.Unlock() + continue + } + + usageCount, lastUsedAt := getLastUsed(cr.md) + c := &cacheUsageInfo{ + refs: len(cr.refs), + mutable: cr.mutable, + size: getSize(cr.md), + createdAt: GetCreatedAt(cr.md), + usageCount: usageCount, + lastUsedAt: lastUsedAt, + description: GetDescription(cr.md), + doubleRef: cr.equalImmutable != nil, + } + if cr.parent != nil { + c.parent = cr.parent.ID() + } + if cr.mutable && c.refs > 0 { + c.size = 0 // size can not be determined because it is changing + } + m[id] = c + rescan[id] = struct{}{} + cr.mu.Unlock() + } + cm.mu.Unlock() + + for { + if len(rescan) == 0 { + break + } + for id := range rescan { + v := m[id] + if v.refs == 0 && v.parent != "" { + m[v.parent].refs-- + if v.doubleRef { + m[v.parent].refs-- + } + rescan[v.parent] = struct{}{} + } + delete(rescan, id) + } + } + + var du []*client.UsageInfo + for id, cr := range m { + if opt.Filter != "" && !strings.HasPrefix(id, opt.Filter) { + continue + } + + c := &client.UsageInfo{ + ID: id, + Mutable: cr.mutable, + InUse: cr.refs > 0, + Size: cr.size, + Parent: cr.parent, + CreatedAt: cr.createdAt, + Description: cr.description, + LastUsedAt: cr.lastUsedAt, + UsageCount: cr.usageCount, + } + du = append(du, c) + } + + eg, ctx := errgroup.WithContext(ctx) + + for _, d := range du { + if d.Size == sizeUnknown { + func(d *client.UsageInfo) { + eg.Go(func() error { + ref, err := cm.Get(ctx, d.ID) + if err != nil { + d.Size = 0 + return nil + } + s, err := ref.Size(ctx) + if err != nil { + return err + } + d.Size = s + return ref.Release(context.TODO()) + }) + }(d) + } + } + + if err := eg.Wait(); err != nil { + return du, err + } + + return du, nil +} + +func IsLocked(err error) bool { + return errors.Cause(err) == errLocked +} + +func IsNotFound(err error) bool { + return errors.Cause(err) == errNotFound +} + +type RefOption func(withMetadata) error + +type cachePolicy int + +const ( + cachePolicyDefault cachePolicy = iota + cachePolicyRetain +) + +type withMetadata interface { + Metadata() *metadata.StorageItem +} + +func HasCachePolicyRetain(m withMetadata) bool { + return getCachePolicy(m.Metadata()) == cachePolicyRetain +} + +func CachePolicyRetain(m withMetadata) error { + return queueCachePolicy(m.Metadata(), cachePolicyRetain) +} + +func WithDescription(descr string) RefOption { + return func(m withMetadata) error { + return queueDescription(m.Metadata(), descr) + } +} + +func WithCreationTime(tm time.Time) RefOption { + return func(m withMetadata) error { + return queueCreatedAt(m.Metadata(), tm) + } +} + +func initializeMetadata(m withMetadata, opts ...RefOption) error { + md := m.Metadata() + if tm := GetCreatedAt(md); !tm.IsZero() { + return nil + } + + if err := queueCreatedAt(md, time.Now()); err != nil { + return err + } + + for _, opt := range opts { + if err := opt(m); err != nil { + return err + } + } + + return md.Commit() +} diff --git a/vendor/github.com/moby/buildkit/cache/metadata.go b/vendor/github.com/moby/buildkit/cache/metadata.go new file mode 100644 index 0000000000..787b5a5b9f --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/metadata.go @@ -0,0 +1,206 @@ +package cache + +import ( + "time" + + "github.com/boltdb/bolt" + "github.com/moby/buildkit/cache/metadata" + "github.com/pkg/errors" +) + +const sizeUnknown int64 = -1 +const keySize = "snapshot.size" +const keyEqualMutable = "cache.equalMutable" +const keyCachePolicy = "cache.cachePolicy" +const keyDescription = "cache.description" +const keyCreatedAt = "cache.createdAt" +const keyLastUsedAt = "cache.lastUsedAt" +const keyUsageCount = "cache.usageCount" + +const keyDeleted = "cache.deleted" + +func setDeleted(si *metadata.StorageItem) error { + v, err := metadata.NewValue(true) + if err != nil { + return errors.Wrap(err, "failed to create size value") + } + si.Update(func(b *bolt.Bucket) error { + return si.SetValue(b, keyDeleted, v) + }) + return nil +} + +func getDeleted(si *metadata.StorageItem) bool { + v := si.Get(keyDeleted) + if v == nil { + return false + } + var deleted bool + if err := v.Unmarshal(&deleted); err != nil { + return false + } + return deleted +} + +func setSize(si *metadata.StorageItem, s int64) error { + v, err := metadata.NewValue(s) + if err != nil { + return errors.Wrap(err, "failed to create size value") + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keySize, v) + }) + return nil +} + +func getSize(si *metadata.StorageItem) int64 { + v := si.Get(keySize) + if v == nil { + return sizeUnknown + } + var size int64 + if err := v.Unmarshal(&size); err != nil { + return sizeUnknown + } + return size +} + +func getEqualMutable(si *metadata.StorageItem) string { + v := si.Get(keyEqualMutable) + if v == nil { + return "" + } + var str string + if err := v.Unmarshal(&str); err != nil { + return "" + } + return str +} + +func setEqualMutable(si *metadata.StorageItem, s string) error { + v, err := metadata.NewValue(s) + if err != nil { + return errors.Wrapf(err, "failed to create %s meta value", keyEqualMutable) + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyEqualMutable, v) + }) + return nil +} + +func clearEqualMutable(si *metadata.StorageItem) error { + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyEqualMutable, nil) + }) + return nil +} + +func queueCachePolicy(si *metadata.StorageItem, p cachePolicy) error { + v, err := metadata.NewValue(p) + if err != nil { + return errors.Wrap(err, "failed to create cachePolicy value") + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyCachePolicy, v) + }) + return nil +} + +func getCachePolicy(si *metadata.StorageItem) cachePolicy { + v := si.Get(keyCachePolicy) + if v == nil { + return cachePolicyDefault + } + var p cachePolicy + if err := v.Unmarshal(&p); err != nil { + return cachePolicyDefault + } + return p +} + +func queueDescription(si *metadata.StorageItem, descr string) error { + v, err := metadata.NewValue(descr) + if err != nil { + return errors.Wrap(err, "failed to create description value") + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyDescription, v) + }) + return nil +} + +func GetDescription(si *metadata.StorageItem) string { + v := si.Get(keyDescription) + if v == nil { + return "" + } + var str string + if err := v.Unmarshal(&str); err != nil { + return "" + } + return str +} + +func queueCreatedAt(si *metadata.StorageItem, tm time.Time) error { + v, err := metadata.NewValue(tm.UnixNano()) + if err != nil { + return errors.Wrap(err, "failed to create createdAt value") + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyCreatedAt, v) + }) + return nil +} + +func GetCreatedAt(si *metadata.StorageItem) time.Time { + v := si.Get(keyCreatedAt) + if v == nil { + return time.Time{} + } + var tm int64 + if err := v.Unmarshal(&tm); err != nil { + return time.Time{} + } + return time.Unix(tm/1e9, tm%1e9) +} + +func getLastUsed(si *metadata.StorageItem) (int, *time.Time) { + v := si.Get(keyUsageCount) + if v == nil { + return 0, nil + } + var usageCount int + if err := v.Unmarshal(&usageCount); err != nil { + return 0, nil + } + v = si.Get(keyLastUsedAt) + if v == nil { + return usageCount, nil + } + var lastUsedTs int64 + if err := v.Unmarshal(&lastUsedTs); err != nil || lastUsedTs == 0 { + return usageCount, nil + } + tm := time.Unix(lastUsedTs/1e9, lastUsedTs%1e9) + return usageCount, &tm +} + +func updateLastUsed(si *metadata.StorageItem) error { + count, _ := getLastUsed(si) + count++ + + v, err := metadata.NewValue(count) + if err != nil { + return errors.Wrap(err, "failed to create usageCount value") + } + v2, err := metadata.NewValue(time.Now().UnixNano()) + if err != nil { + return errors.Wrap(err, "failed to create lastUsedAt value") + } + return si.Update(func(b *bolt.Bucket) error { + if err := si.SetValue(b, keyUsageCount, v); err != nil { + return err + } + return si.SetValue(b, keyLastUsedAt, v2) + }) +} diff --git a/vendor/github.com/moby/buildkit/cache/metadata/metadata.go b/vendor/github.com/moby/buildkit/cache/metadata/metadata.go new file mode 100644 index 0000000000..461ff6cfd7 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/metadata/metadata.go @@ -0,0 +1,382 @@ +package metadata + +import ( + "bytes" + "encoding/json" + "strings" + "sync" + + "github.com/boltdb/bolt" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + mainBucket = "_main" + indexBucket = "_index" + externalBucket = "_external" +) + +var errNotFound = errors.Errorf("not found") + +type Store struct { + db *bolt.DB +} + +func NewStore(dbPath string) (*Store, error) { + db, err := bolt.Open(dbPath, 0600, nil) + if err != nil { + return nil, errors.Wrapf(err, "failed to open database file %s", dbPath) + } + return &Store{db: db}, nil +} + +func (s *Store) DB() *bolt.DB { + return s.db +} + +func (s *Store) All() ([]*StorageItem, error) { + var out []*StorageItem + err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(mainBucket)) + if b == nil { + return nil + } + return b.ForEach(func(key, _ []byte) error { + b := b.Bucket(key) + if b == nil { + return nil + } + si, err := newStorageItem(string(key), b, s) + if err != nil { + return err + } + out = append(out, si) + return nil + }) + }) + return out, err +} + +func (s *Store) Probe(index string) (bool, error) { + var exists bool + err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(indexBucket)) + if b == nil { + return nil + } + main := tx.Bucket([]byte(mainBucket)) + if main == nil { + return nil + } + search := []byte(indexKey(index, "")) + c := b.Cursor() + k, _ := c.Seek(search) + if k != nil && bytes.HasPrefix(k, search) { + exists = true + } + return nil + }) + return exists, err +} + +func (s *Store) Search(index string) ([]*StorageItem, error) { + var out []*StorageItem + err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(indexBucket)) + if b == nil { + return nil + } + main := tx.Bucket([]byte(mainBucket)) + if main == nil { + return nil + } + index = indexKey(index, "") + c := b.Cursor() + k, _ := c.Seek([]byte(index)) + for { + if k != nil && strings.HasPrefix(string(k), index) { + itemID := strings.TrimPrefix(string(k), index) + k, _ = c.Next() + b := main.Bucket([]byte(itemID)) + if b == nil { + logrus.Errorf("index pointing to missing record %s", itemID) + continue + } + si, err := newStorageItem(itemID, b, s) + if err != nil { + return err + } + out = append(out, si) + } else { + break + } + } + return nil + }) + return out, err +} + +func (s *Store) View(id string, fn func(b *bolt.Bucket) error) error { + return s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(mainBucket)) + if b == nil { + return errors.WithStack(errNotFound) + } + b = b.Bucket([]byte(id)) + if b == nil { + return errors.WithStack(errNotFound) + } + return fn(b) + }) +} + +func (s *Store) Clear(id string) error { + return s.db.Update(func(tx *bolt.Tx) error { + external := tx.Bucket([]byte(externalBucket)) + if external != nil { + external.DeleteBucket([]byte(id)) + } + main := tx.Bucket([]byte(mainBucket)) + if main == nil { + return nil + } + b := main.Bucket([]byte(id)) + if b == nil { + return nil + } + si, err := newStorageItem(id, b, s) + if err != nil { + return err + } + if indexes := si.Indexes(); len(indexes) > 0 { + b := tx.Bucket([]byte(indexBucket)) + if b != nil { + for _, index := range indexes { + if err := b.Delete([]byte(indexKey(index, id))); err != nil { + return err + } + } + } + } + return main.DeleteBucket([]byte(id)) + }) +} + +func (s *Store) Update(id string, fn func(b *bolt.Bucket) error) error { + return s.db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte(mainBucket)) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(id)) + if err != nil { + return err + } + return fn(b) + }) +} + +func (s *Store) Get(id string) (*StorageItem, bool) { + empty := func() *StorageItem { + si, _ := newStorageItem(id, nil, s) + return si + } + tx, err := s.db.Begin(false) + if err != nil { + return empty(), false + } + defer tx.Rollback() + b := tx.Bucket([]byte(mainBucket)) + if b == nil { + return empty(), false + } + b = b.Bucket([]byte(id)) + if b == nil { + return empty(), false + } + si, _ := newStorageItem(id, b, s) + return si, true +} + +func (s *Store) Close() error { + return s.db.Close() +} + +type StorageItem struct { + id string + values map[string]*Value + queue []func(*bolt.Bucket) error + storage *Store + mu sync.RWMutex +} + +func newStorageItem(id string, b *bolt.Bucket, s *Store) (*StorageItem, error) { + si := &StorageItem{ + id: id, + storage: s, + values: make(map[string]*Value), + } + if b != nil { + if err := b.ForEach(func(k, v []byte) error { + var sv Value + if len(v) > 0 { + if err := json.Unmarshal(v, &sv); err != nil { + return err + } + si.values[string(k)] = &sv + } + return nil + }); err != nil { + return si, err + } + } + return si, nil +} + +func (s *StorageItem) Storage() *Store { // TODO: used in local source. how to remove this? + return s.storage +} + +func (s *StorageItem) ID() string { + return s.id +} + +func (s *StorageItem) View(fn func(b *bolt.Bucket) error) error { + return s.storage.View(s.id, fn) +} + +func (s *StorageItem) Update(fn func(b *bolt.Bucket) error) error { + return s.storage.Update(s.id, fn) +} + +func (s *StorageItem) Keys() []string { + keys := make([]string, 0, len(s.values)) + for k := range s.values { + keys = append(keys, k) + } + return keys +} + +func (s *StorageItem) Get(k string) *Value { + s.mu.RLock() + v := s.values[k] + s.mu.RUnlock() + return v +} + +func (s *StorageItem) GetExternal(k string) ([]byte, error) { + var dt []byte + err := s.storage.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(externalBucket)) + if b == nil { + return errors.WithStack(errNotFound) + } + b = b.Bucket([]byte(s.id)) + if b == nil { + return errors.WithStack(errNotFound) + } + dt = b.Get([]byte(k)) + if dt == nil { + return errors.WithStack(errNotFound) + } + return nil + }) + if err != nil { + return nil, err + } + return dt, nil +} + +func (s *StorageItem) SetExternal(k string, dt []byte) error { + return s.storage.db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte(externalBucket)) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(s.id)) + if err != nil { + return err + } + return b.Put([]byte(k), dt) + }) +} + +func (s *StorageItem) Queue(fn func(b *bolt.Bucket) error) { + s.mu.Lock() + defer s.mu.Unlock() + s.queue = append(s.queue, fn) +} + +func (s *StorageItem) Commit() error { + s.mu.Lock() + defer s.mu.Unlock() + return s.Update(func(b *bolt.Bucket) error { + for _, fn := range s.queue { + if err := fn(b); err != nil { + return err + } + } + s.queue = s.queue[:0] + return nil + }) +} + +func (s *StorageItem) Indexes() (out []string) { + for _, v := range s.values { + if v.Index != "" { + out = append(out, v.Index) + } + } + return +} + +func (s *StorageItem) SetValue(b *bolt.Bucket, key string, v *Value) error { + if v == nil { + if err := b.Put([]byte(key), nil); err != nil { + return err + } + delete(s.values, key) + return nil + } + dt, err := json.Marshal(v) + if err != nil { + return err + } + if err := b.Put([]byte(key), dt); err != nil { + return err + } + if v.Index != "" { + b, err := b.Tx().CreateBucketIfNotExists([]byte(indexBucket)) + if err != nil { + return err + } + if err := b.Put([]byte(indexKey(v.Index, s.ID())), []byte{}); err != nil { + return err + } + } + s.values[key] = v + return nil +} + +type Value struct { + Value json.RawMessage `json:"value,omitempty"` + Index string `json:"index,omitempty"` +} + +func NewValue(v interface{}) (*Value, error) { + dt, err := json.Marshal(v) + if err != nil { + return nil, err + } + return &Value{Value: json.RawMessage(dt)}, nil +} + +func (v *Value) Unmarshal(target interface{}) error { + err := json.Unmarshal(v.Value, target) + return err +} + +func indexKey(index, target string) string { + return index + "::" + target +} diff --git a/vendor/github.com/moby/buildkit/cache/refs.go b/vendor/github.com/moby/buildkit/cache/refs.go new file mode 100644 index 0000000000..060d91ead1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/refs.go @@ -0,0 +1,380 @@ +package cache + +import ( + "context" + "sync" + + "github.com/containerd/containerd/mount" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/util/flightcontrol" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Ref is a reference to cacheable objects. +type Ref interface { + Mountable + ID() string + Release(context.Context) error + Size(ctx context.Context) (int64, error) + Metadata() *metadata.StorageItem +} + +type ImmutableRef interface { + Ref + Parent() ImmutableRef + Finalize(ctx context.Context) error // Make sure reference is flushed to driver + Clone() ImmutableRef +} + +type MutableRef interface { + Ref + Commit(context.Context) (ImmutableRef, error) +} + +type Mountable interface { + Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) +} + +type cacheRecord struct { + cm *cacheManager + mu *sync.Mutex // the mutex is shared by records sharing data + + mutable bool + refs map[Mountable]struct{} + parent ImmutableRef + md *metadata.StorageItem + + // dead means record is marked as deleted + dead bool + + view string + viewMount snapshot.Mountable + + sizeG flightcontrol.Group + + // these are filled if multiple refs point to same data + equalMutable *mutableRef + equalImmutable *immutableRef +} + +// hold ref lock before calling +func (cr *cacheRecord) ref() *immutableRef { + ref := &immutableRef{cacheRecord: cr} + cr.refs[ref] = struct{}{} + return ref +} + +// hold ref lock before calling +func (cr *cacheRecord) mref() *mutableRef { + ref := &mutableRef{cacheRecord: cr} + cr.refs[ref] = struct{}{} + return ref +} + +// hold ref lock before calling +func (cr *cacheRecord) isDead() bool { + return cr.dead || (cr.equalImmutable != nil && cr.equalImmutable.dead) || (cr.equalMutable != nil && cr.equalMutable.dead) +} + +func (cr *cacheRecord) Size(ctx context.Context) (int64, error) { + // this expects that usage() is implemented lazily + s, err := cr.sizeG.Do(ctx, cr.ID(), func(ctx context.Context) (interface{}, error) { + cr.mu.Lock() + s := getSize(cr.md) + if s != sizeUnknown { + cr.mu.Unlock() + return s, nil + } + driverID := cr.ID() + if cr.equalMutable != nil { + driverID = cr.equalMutable.ID() + } + cr.mu.Unlock() + usage, err := cr.cm.ManagerOpt.Snapshotter.Usage(ctx, driverID) + if err != nil { + cr.mu.Lock() + isDead := cr.isDead() + cr.mu.Unlock() + if isDead { + return int64(0), nil + } + return s, errors.Wrapf(err, "failed to get usage for %s", cr.ID()) + } + cr.mu.Lock() + setSize(cr.md, usage.Size) + if err := cr.md.Commit(); err != nil { + cr.mu.Unlock() + return s, err + } + cr.mu.Unlock() + return usage.Size, nil + }) + return s.(int64), err +} + +func (cr *cacheRecord) Parent() ImmutableRef { + if cr.parent == nil { + return nil + } + p := cr.parent.(*immutableRef) + p.mu.Lock() + defer p.mu.Unlock() + return p.ref() +} + +func (cr *cacheRecord) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + if cr.mutable { + m, err := cr.cm.Snapshotter.Mounts(ctx, cr.ID()) + if err != nil { + return nil, errors.Wrapf(err, "failed to mount %s", cr.ID()) + } + if readonly { + m = setReadonly(m) + } + return m, nil + } + + if cr.equalMutable != nil && readonly { + m, err := cr.cm.Snapshotter.Mounts(ctx, cr.equalMutable.ID()) + if err != nil { + return nil, errors.Wrapf(err, "failed to mount %s", cr.equalMutable.ID()) + } + return setReadonly(m), nil + } + + if err := cr.finalize(ctx); err != nil { + return nil, err + } + if cr.viewMount == nil { // TODO: handle this better + cr.view = identity.NewID() + m, err := cr.cm.Snapshotter.View(ctx, cr.view, cr.ID()) + if err != nil { + cr.view = "" + return nil, errors.Wrapf(err, "failed to mount %s", cr.ID()) + } + cr.viewMount = m + } + return cr.viewMount, nil +} + +// call when holding the manager lock +func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) error { + delete(cr.cm.records, cr.ID()) + if cr.parent != nil { + if err := cr.parent.(*immutableRef).release(ctx); err != nil { + return err + } + } + if removeSnapshot { + if err := cr.cm.Snapshotter.Remove(ctx, cr.ID()); err != nil { + return err + } + } + if err := cr.cm.md.Clear(cr.ID()); err != nil { + return err + } + return nil +} + +func (cr *cacheRecord) ID() string { + return cr.md.ID() +} + +type immutableRef struct { + *cacheRecord +} + +type mutableRef struct { + *cacheRecord +} + +func (sr *immutableRef) Clone() ImmutableRef { + sr.mu.Lock() + ref := sr.ref() + sr.mu.Unlock() + return ref +} + +func (sr *immutableRef) Release(ctx context.Context) error { + sr.cm.mu.Lock() + defer sr.cm.mu.Unlock() + + sr.mu.Lock() + defer sr.mu.Unlock() + + return sr.release(ctx) +} + +func (sr *immutableRef) release(ctx context.Context) error { + delete(sr.refs, sr) + + if len(sr.refs) == 0 { + updateLastUsed(sr.md) + if sr.viewMount != nil { // TODO: release viewMount earlier if possible + if err := sr.cm.Snapshotter.Remove(ctx, sr.view); err != nil { + return err + } + sr.view = "" + sr.viewMount = nil + } + + if sr.equalMutable != nil { + sr.equalMutable.release(ctx) + } + // go sr.cm.GC() + } + + return nil +} + +func (sr *immutableRef) Finalize(ctx context.Context) error { + sr.mu.Lock() + defer sr.mu.Unlock() + + return sr.finalize(ctx) +} + +func (cr *cacheRecord) Metadata() *metadata.StorageItem { + return cr.md +} + +func (cr *cacheRecord) finalize(ctx context.Context) error { + mutable := cr.equalMutable + if mutable == nil { + return nil + } + err := cr.cm.Snapshotter.Commit(ctx, cr.ID(), mutable.ID()) + if err != nil { + return errors.Wrapf(err, "failed to commit %s", mutable.ID()) + } + mutable.dead = true + go func() { + cr.cm.mu.Lock() + defer cr.cm.mu.Unlock() + if err := mutable.remove(context.TODO(), false); err != nil { + logrus.Error(err) + } + }() + cr.equalMutable = nil + clearEqualMutable(cr.md) + return cr.md.Commit() +} + +func (sr *mutableRef) commit(ctx context.Context) (ImmutableRef, error) { + if !sr.mutable || len(sr.refs) == 0 { + return nil, errors.Wrapf(errInvalid, "invalid mutable ref") + } + + id := identity.NewID() + md, _ := sr.cm.md.Get(id) + + rec := &cacheRecord{ + mu: sr.mu, + cm: sr.cm, + parent: sr.Parent(), + equalMutable: sr, + refs: make(map[Mountable]struct{}), + md: md, + } + + if descr := GetDescription(sr.md); descr != "" { + if err := queueDescription(md, descr); err != nil { + return nil, err + } + } + + if err := initializeMetadata(rec); err != nil { + return nil, err + } + + sr.cm.records[id] = rec + + if err := sr.md.Commit(); err != nil { + return nil, err + } + + setSize(md, sizeUnknown) + setEqualMutable(md, sr.ID()) + if err := md.Commit(); err != nil { + return nil, err + } + + ref := rec.ref() + sr.equalImmutable = ref + return ref, nil +} + +func (sr *mutableRef) Commit(ctx context.Context) (ImmutableRef, error) { + sr.cm.mu.Lock() + defer sr.cm.mu.Unlock() + + sr.mu.Lock() + defer sr.mu.Unlock() + + return sr.commit(ctx) +} + +func (sr *mutableRef) Release(ctx context.Context) error { + sr.cm.mu.Lock() + defer sr.cm.mu.Unlock() + + sr.mu.Lock() + defer sr.mu.Unlock() + + return sr.release(ctx) +} + +func (sr *mutableRef) release(ctx context.Context) error { + delete(sr.refs, sr) + if getCachePolicy(sr.md) != cachePolicyRetain { + if sr.equalImmutable != nil { + if getCachePolicy(sr.equalImmutable.md) == cachePolicyRetain { + return nil + } + if err := sr.equalImmutable.remove(ctx, false); err != nil { + return err + } + } + if sr.parent != nil { + if err := sr.parent.(*immutableRef).release(ctx); err != nil { + return err + } + } + return sr.remove(ctx, true) + } else { + updateLastUsed(sr.md) + } + return nil +} + +func setReadonly(mounts snapshot.Mountable) snapshot.Mountable { + return &readOnlyMounter{mounts} +} + +type readOnlyMounter struct { + snapshot.Mountable +} + +func (m *readOnlyMounter) Mount() ([]mount.Mount, error) { + mounts, err := m.Mountable.Mount() + if err != nil { + return nil, err + } + for i, m := range mounts { + opts := make([]string, 0, len(m.Options)) + for _, opt := range m.Options { + if opt != "rw" { + opts = append(opts, opt) + } + } + opts = append(opts, "ro") + mounts[i].Options = opts + } + return mounts, nil +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/export.go b/vendor/github.com/moby/buildkit/cache/remotecache/export.go new file mode 100644 index 0000000000..40a36759d8 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/export.go @@ -0,0 +1,141 @@ +package remotecache + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/docker/distribution/manifest" + v1 "github.com/moby/buildkit/cache/remotecache/v1" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/push" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type ExporterOpt struct { + SessionManager *session.Manager +} + +func NewCacheExporter(opt ExporterOpt) *CacheExporter { + return &CacheExporter{opt: opt} +} + +type CacheExporter struct { + opt ExporterOpt +} + +func (ce *CacheExporter) ExporterForTarget(target string) *RegistryCacheExporter { + cc := v1.NewCacheChains() + return &RegistryCacheExporter{target: target, CacheExporterTarget: cc, chains: cc, exporter: ce} +} + +func (ce *CacheExporter) Finalize(ctx context.Context, cc *v1.CacheChains, target string) error { + config, descs, err := cc.Marshal() + if err != nil { + return err + } + + // own type because oci type can't be pushed and docker type doesn't have annotations + type manifestList struct { + manifest.Versioned + + // Manifests references platform specific manifests. + Manifests []ocispec.Descriptor `json:"manifests"` + } + + var mfst manifestList + mfst.SchemaVersion = 2 + mfst.MediaType = images.MediaTypeDockerSchema2ManifestList + + allBlobs := map[digest.Digest]struct{}{} + mp := contentutil.NewMultiProvider(nil) + for _, l := range config.Layers { + if _, ok := allBlobs[l.Blob]; ok { + continue + } + dgstPair, ok := descs[l.Blob] + if !ok { + return errors.Errorf("missing blob %s", l.Blob) + } + allBlobs[l.Blob] = struct{}{} + mp.Add(l.Blob, dgstPair.Provider) + + mfst.Manifests = append(mfst.Manifests, dgstPair.Descriptor) + } + + dt, err := json.Marshal(config) + if err != nil { + return err + } + dgst := digest.FromBytes(dt) + desc := ocispec.Descriptor{ + Digest: dgst, + Size: int64(len(dt)), + MediaType: v1.CacheConfigMediaTypeV0, + } + configDone := oneOffProgress(ctx, fmt.Sprintf("writing config %s", dgst)) + buf := contentutil.NewBuffer() + if err := content.WriteBlob(ctx, buf, dgst.String(), bytes.NewReader(dt), desc); err != nil { + return configDone(errors.Wrap(err, "error writing config blob")) + } + configDone(nil) + + mp.Add(dgst, buf) + mfst.Manifests = append(mfst.Manifests, desc) + + dt, err = json.Marshal(mfst) + if err != nil { + return errors.Wrap(err, "failed to marshal manifest") + } + dgst = digest.FromBytes(dt) + + buf = contentutil.NewBuffer() + desc = ocispec.Descriptor{ + Digest: dgst, + Size: int64(len(dt)), + } + mfstDone := oneOffProgress(ctx, fmt.Sprintf("writing manifest %s", dgst)) + if err := content.WriteBlob(ctx, buf, dgst.String(), bytes.NewReader(dt), desc); err != nil { + return mfstDone(errors.Wrap(err, "error writing manifest blob")) + } + mfstDone(nil) + mp.Add(dgst, buf) + + return push.Push(ctx, ce.opt.SessionManager, mp, dgst, target, false) +} + +type RegistryCacheExporter struct { + solver.CacheExporterTarget + chains *v1.CacheChains + target string + exporter *CacheExporter +} + +func (ce *RegistryCacheExporter) Finalize(ctx context.Context) error { + return ce.exporter.Finalize(ctx, ce.chains, ce.target) +} + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.FromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/import.go b/vendor/github.com/moby/buildkit/cache/remotecache/import.go new file mode 100644 index 0000000000..fa17091b10 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/import.go @@ -0,0 +1,124 @@ +package remotecache + +import ( + "context" + "encoding/json" + "net/http" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + v1 "github.com/moby/buildkit/cache/remotecache/v1" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/auth" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/worker" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type ImportOpt struct { + SessionManager *session.Manager + Worker worker.Worker // TODO: remove. This sets the worker where the cache is imported to. Should be passed on load instead. +} + +func NewCacheImporter(opt ImportOpt) *CacheImporter { + return &CacheImporter{opt: opt} +} + +type CacheImporter struct { + opt ImportOpt +} + +func (ci *CacheImporter) getCredentialsFromSession(ctx context.Context) func(string) (string, string, error) { + id := session.FromContext(ctx) + if id == "" { + return nil + } + + return func(host string) (string, string, error) { + timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + caller, err := ci.opt.SessionManager.Get(timeoutCtx, id) + if err != nil { + return "", "", err + } + + return auth.CredentialsFunc(context.TODO(), caller)(host) + } +} + +func (ci *CacheImporter) Resolve(ctx context.Context, ref string) (solver.CacheManager, error) { + resolver := docker.NewResolver(docker.ResolverOptions{ + Client: http.DefaultClient, + Credentials: ci.getCredentialsFromSession(ctx), + }) + + ref, desc, err := resolver.Resolve(ctx, ref) + if err != nil { + return nil, err + } + + fetcher, err := resolver.Fetcher(ctx, ref) + if err != nil { + return nil, err + } + + b := contentutil.NewBuffer() + + if _, err := remotes.FetchHandler(b, fetcher)(ctx, desc); err != nil { + return nil, err + } + + dt, err := content.ReadBlob(ctx, b, desc) + if err != nil { + return nil, err + } + + var mfst ocispec.Index + if err := json.Unmarshal(dt, &mfst); err != nil { + return nil, err + } + + allLayers := v1.DescriptorProvider{} + + var configDesc ocispec.Descriptor + + for _, m := range mfst.Manifests { + if m.MediaType == v1.CacheConfigMediaTypeV0 { + configDesc = m + continue + } + allLayers[m.Digest] = v1.DescriptorProviderPair{ + Descriptor: m, + Provider: contentutil.FromFetcher(fetcher, m), + } + } + + if configDesc.Digest == "" { + return nil, errors.Errorf("invalid build cache from %s", ref) + } + + if _, err := remotes.FetchHandler(b, fetcher)(ctx, configDesc); err != nil { + return nil, err + } + + dt, err = content.ReadBlob(ctx, b, configDesc) + if err != nil { + return nil, err + } + + cc := v1.NewCacheChains() + if err := v1.Parse(dt, allLayers, cc); err != nil { + return nil, err + } + + keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, ci.opt.Worker) + if err != nil { + return nil, err + } + return solver.NewCacheManager(ref, keysStorage, resultStorage), nil +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go new file mode 100644 index 0000000000..27b19587c8 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go @@ -0,0 +1,247 @@ +package cacheimport + +import ( + "context" + + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func NewCacheKeyStorage(cc *CacheChains, w worker.Worker) (solver.CacheKeyStorage, solver.CacheResultStorage, error) { + storage := &cacheKeyStorage{ + byID: map[string]*itemWithOutgoingLinks{}, + byItem: map[*item]string{}, + byResult: map[string]map[string]struct{}{}, + } + + for _, it := range cc.items { + if _, err := addItemToStorage(storage, it); err != nil { + return nil, nil, err + } + } + + results := &cacheResultStorage{ + w: w, + byID: storage.byID, + byResult: storage.byResult, + } + + return storage, results, nil +} + +func addItemToStorage(k *cacheKeyStorage, it *item) (*itemWithOutgoingLinks, error) { + if id, ok := k.byItem[it]; ok { + if id == "" { + return nil, errors.Errorf("invalid loop") + } + return k.byID[id], nil + } + + var id string + if len(it.links) == 0 { + id = it.dgst.String() + } else { + id = identity.NewID() + } + + k.byItem[it] = "" + + for i, m := range it.links { + for l := range m { + src, err := addItemToStorage(k, l.src) + if err != nil { + return nil, err + } + cl := nlink{ + input: i, + dgst: it.dgst, + selector: l.selector, + } + src.links[cl] = append(src.links[cl], id) + } + } + + k.byItem[it] = id + + itl := &itemWithOutgoingLinks{ + item: it, + links: map[nlink][]string{}, + } + + k.byID[id] = itl + + if res := it.result; res != nil { + resultID := remoteID(res) + ids, ok := k.byResult[resultID] + if !ok { + ids = map[string]struct{}{} + k.byResult[resultID] = ids + } + ids[id] = struct{}{} + } + return itl, nil +} + +type cacheKeyStorage struct { + byID map[string]*itemWithOutgoingLinks + byItem map[*item]string + byResult map[string]map[string]struct{} +} + +type itemWithOutgoingLinks struct { + *item + links map[nlink][]string +} + +func (cs *cacheKeyStorage) Exists(id string) bool { + _, ok := cs.byID[id] + return ok +} + +func (cs *cacheKeyStorage) Walk(func(id string) error) error { + return nil +} + +func (cs *cacheKeyStorage) WalkResults(id string, fn func(solver.CacheResult) error) error { + it, ok := cs.byID[id] + if !ok { + return nil + } + if res := it.result; res != nil { + return fn(solver.CacheResult{ID: remoteID(res), CreatedAt: it.resultTime}) + } + return nil +} + +func (cs *cacheKeyStorage) Load(id string, resultID string) (solver.CacheResult, error) { + it, ok := cs.byID[id] + if !ok { + return solver.CacheResult{}, nil + } + if res := it.result; res != nil { + return solver.CacheResult{ID: remoteID(res), CreatedAt: it.resultTime}, nil + } + return solver.CacheResult{}, nil +} + +func (cs *cacheKeyStorage) AddResult(id string, res solver.CacheResult) error { + return nil +} + +func (cs *cacheKeyStorage) Release(resultID string) error { + return nil +} +func (cs *cacheKeyStorage) AddLink(id string, link solver.CacheInfoLink, target string) error { + return nil +} +func (cs *cacheKeyStorage) WalkLinks(id string, link solver.CacheInfoLink, fn func(id string) error) error { + it, ok := cs.byID[id] + if !ok { + return nil + } + for _, id := range it.links[nlink{ + dgst: outputKey(link.Digest, int(link.Output)), + input: int(link.Input), + selector: link.Selector.String(), + }] { + if err := fn(id); err != nil { + return err + } + } + return nil +} + +// TODO: +func (cs *cacheKeyStorage) WalkBacklinks(id string, fn func(id string, link solver.CacheInfoLink) error) error { + return nil +} + +func (cs *cacheKeyStorage) WalkIDsByResult(id string, fn func(id string) error) error { + ids := cs.byResult[id] + for id := range ids { + if err := fn(id); err != nil { + return err + } + } + return nil +} + +func (cs *cacheKeyStorage) HasLink(id string, link solver.CacheInfoLink, target string) bool { + l := nlink{ + dgst: outputKey(link.Digest, int(link.Output)), + input: int(link.Input), + selector: link.Selector.String(), + } + if it, ok := cs.byID[id]; ok { + for _, id := range it.links[l] { + if id == target { + return true + } + } + } + return false +} + +type cacheResultStorage struct { + w worker.Worker + byID map[string]*itemWithOutgoingLinks + byResult map[string]map[string]struct{} +} + +func (cs *cacheResultStorage) Save(res solver.Result) (solver.CacheResult, error) { + return solver.CacheResult{}, errors.Errorf("importer is immutable") +} + +func (cs *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult) (solver.Result, error) { + remote, err := cs.LoadRemote(ctx, res) + if err != nil { + return nil, err + } + + ref, err := cs.w.FromRemote(ctx, remote) + if err != nil { + return nil, err + } + return worker.NewWorkerRefResult(ref, cs.w), nil +} + +func (cs *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheResult) (*solver.Remote, error) { + if r := cs.byResultID(res.ID); r != nil { + return r, nil + } + return nil, errors.WithStack(solver.ErrNotFound) +} + +func (cs *cacheResultStorage) Exists(id string) bool { + return cs.byResultID(id) != nil +} + +func (cs *cacheResultStorage) byResultID(resultID string) *solver.Remote { + m, ok := cs.byResult[resultID] + if !ok || len(m) == 0 { + return nil + } + + for id := range m { + it, ok := cs.byID[id] + if ok { + if r := it.result; r != nil { + return r + } + } + } + + return nil +} + +// unique ID per remote. this ID is not stable. +func remoteID(r *solver.Remote) string { + dgstr := digest.Canonical.Digester() + for _, desc := range r.Descriptors { + dgstr.Hash().Write([]byte(desc.Digest)) + } + return dgstr.Digest().String() +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go new file mode 100644 index 0000000000..52806b9c44 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go @@ -0,0 +1,127 @@ +package cacheimport + +import ( + "time" + + "github.com/containerd/containerd/content" + "github.com/moby/buildkit/solver" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +func NewCacheChains() *CacheChains { + return &CacheChains{visited: map[interface{}]struct{}{}} +} + +type CacheChains struct { + items []*item + visited map[interface{}]struct{} +} + +func (c *CacheChains) Add(dgst digest.Digest) solver.CacheExporterRecord { + it := &item{c: c, dgst: dgst} + c.items = append(c.items, it) + return it +} + +func (c *CacheChains) Visit(v interface{}) { + c.visited[v] = struct{}{} +} + +func (c *CacheChains) Visited(v interface{}) bool { + _, ok := c.visited[v] + return ok +} + +func (c *CacheChains) normalize() error { + st := &normalizeState{ + added: map[*item]*item{}, + links: map[*item]map[nlink]map[digest.Digest]struct{}{}, + byKey: map[digest.Digest]*item{}, + } + + for _, it := range c.items { + _, err := normalizeItem(it, st) + if err != nil { + return err + } + } + + items := make([]*item, 0, len(st.byKey)) + for _, it := range st.byKey { + items = append(items, it) + } + c.items = items + return nil +} + +func (c *CacheChains) Marshal() (*CacheConfig, DescriptorProvider, error) { + if err := c.normalize(); err != nil { + return nil, nil, err + } + + st := &marshalState{ + chainsByID: map[string]int{}, + descriptors: DescriptorProvider{}, + recordsByItem: map[*item]int{}, + } + + for _, it := range c.items { + if err := marshalItem(it, st); err != nil { + return nil, nil, err + } + } + + cc := CacheConfig{ + Layers: st.layers, + Records: st.records, + } + sortConfig(&cc) + + return &cc, st.descriptors, nil +} + +type DescriptorProvider map[digest.Digest]DescriptorProviderPair + +type DescriptorProviderPair struct { + Descriptor ocispec.Descriptor + Provider content.Provider +} + +type item struct { + c *CacheChains + dgst digest.Digest + + result *solver.Remote + resultTime time.Time + + links []map[link]struct{} +} + +type link struct { + src *item + selector string +} + +func (c *item) AddResult(createdAt time.Time, result *solver.Remote) { + c.resultTime = createdAt + c.result = result +} + +func (c *item) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) { + src, ok := rec.(*item) + if !ok { + return + } + + for { + if index < len(c.links) { + break + } + c.links = append(c.links, map[link]struct{}{}) + } + + c.links[index][link{src: src, selector: selector}] = struct{}{} +} + +var _ solver.CacheExporterTarget = &CacheChains{} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go new file mode 100644 index 0000000000..4cff811490 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go @@ -0,0 +1,50 @@ +package cacheimport + +// Distibutable build cache +// +// Main manifest is OCI image index +// https://github.com/opencontainers/image-spec/blob/master/image-index.md . +// Manifests array contains descriptors to the cache layers and one instance of +// build cache config with media type application/vnd.buildkit.cacheconfig.v0 . +// The cache layer descripts need to have an annotation with uncompressed digest +// to allow deduplication on extraction and optionally "buildkit/createdat" +// annotation to support maintaining original timestamps. +// +// Cache config file layout: +// +//{ +// "layers": [ +// { +// "blob": "sha256:deadbeef", <- digest of layer blob in index +// "parent": -1 <- index of parent layer, -1 if no parent +// }, +// { +// "blob": "sha256:deadbeef", +// "parent": 0 +// } +// ], +// +// "records": [ +// { +// "digest": "sha256:deadbeef", <- base digest for the record +// }, +// { +// "digest": "sha256:deadbeef", +// "output": 1, <- optional output index +// "layers": [ <- optional array or layer chains +// { +// "createdat": "", +// "layer": 1, <- index to the layer +// } +// ], +// "inputs": [ <- dependant records +// [ <- index of the dependency (0) +// { +// "selector": "sel", <- optional selector +// "link": 0, <- index to the dependant record +// } +// ] +// ] +// } +// ] +// } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go new file mode 100644 index 0000000000..8aa6929ea0 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go @@ -0,0 +1,102 @@ +package cacheimport + +import ( + "encoding/json" + + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/contentutil" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func Parse(configJSON []byte, provider DescriptorProvider, t solver.CacheExporterTarget) error { + var config CacheConfig + if err := json.Unmarshal(configJSON, &config); err != nil { + return err + } + + cache := map[int]solver.CacheExporterRecord{} + + for i := range config.Records { + if _, err := parseRecord(config, i, provider, t, cache); err != nil { + return err + } + } + + return nil +} + +func parseRecord(cc CacheConfig, idx int, provider DescriptorProvider, t solver.CacheExporterTarget, cache map[int]solver.CacheExporterRecord) (solver.CacheExporterRecord, error) { + if r, ok := cache[idx]; ok { + if r == nil { + return nil, errors.Errorf("invalid looping record") + } + return r, nil + } + + if idx < 0 || idx >= len(cc.Records) { + return nil, errors.Errorf("invalid record ID: %d", idx) + } + rec := cc.Records[idx] + + r := t.Add(rec.Digest) + cache[idx] = nil + for i, inputs := range rec.Inputs { + for _, inp := range inputs { + src, err := parseRecord(cc, inp.LinkIndex, provider, t, cache) + if err != nil { + return nil, err + } + r.LinkFrom(src, i, inp.Selector) + } + } + + for _, res := range rec.Results { + visited := map[int]struct{}{} + remote, err := getRemoteChain(cc.Layers, res.LayerIndex, provider, visited) + if err != nil { + return nil, err + } + r.AddResult(res.CreatedAt, remote) + } + + cache[idx] = r + return r, nil +} + +func getRemoteChain(layers []CacheLayer, idx int, provider DescriptorProvider, visited map[int]struct{}) (*solver.Remote, error) { + if _, ok := visited[idx]; ok { + return nil, errors.Errorf("invalid looping layer") + } + visited[idx] = struct{}{} + + if idx < 0 || idx >= len(layers) { + return nil, errors.Errorf("invalid layer index %d", idx) + } + + l := layers[idx] + + descPair, ok := provider[l.Blob] + if !ok { + return nil, errors.Errorf("missing blob for %s", l.Blob) + } + + var r *solver.Remote + if l.ParentIndex != -1 { + var err error + r, err = getRemoteChain(layers, l.ParentIndex, provider, visited) + if err != nil { + return nil, err + } + r.Descriptors = append(r.Descriptors, descPair.Descriptor) + mp := contentutil.NewMultiProvider(r.Provider) + mp.Add(descPair.Descriptor.Digest, descPair.Provider) + r.Provider = mp + return r, nil + } + return &solver.Remote{ + Descriptors: []ocispec.Descriptor{descPair.Descriptor}, + Provider: descPair.Provider, + }, nil + +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/spec.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/spec.go new file mode 100644 index 0000000000..4c6bc0bb26 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/spec.go @@ -0,0 +1,35 @@ +package cacheimport + +import ( + "time" + + digest "github.com/opencontainers/go-digest" +) + +const CacheConfigMediaTypeV0 = "application/vnd.buildkit.cacheconfig.v0" + +type CacheConfig struct { + Layers []CacheLayer `json:"layers,omitempty"` + Records []CacheRecord `json:"records,omitempty"` +} + +type CacheLayer struct { + Blob digest.Digest `json:"blob,omitempty"` + ParentIndex int `json:"parent,omitempty"` +} + +type CacheRecord struct { + Results []CacheResult `json:"layers,omitempty"` + Digest digest.Digest `json:"digest,omitempty"` + Inputs [][]CacheInput `json:"inputs,omitempty"` +} + +type CacheResult struct { + LayerIndex int `json:"layer"` + CreatedAt time.Time `json:"createdAt,omitempty"` +} + +type CacheInput struct { + Selector string `json:"selector,omitempty"` + LinkIndex int `json:"link"` +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go new file mode 100644 index 0000000000..665eb330ca --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go @@ -0,0 +1,306 @@ +package cacheimport + +import ( + "fmt" + "sort" + + "github.com/containerd/containerd/content" + "github.com/moby/buildkit/solver" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// sortConfig sorts the config structure to make sure it is deterministic +func sortConfig(cc *CacheConfig) { + type indexedLayer struct { + oldIndex int + newIndex int + l CacheLayer + } + + unsortedLayers := make([]*indexedLayer, len(cc.Layers)) + sortedLayers := make([]*indexedLayer, len(cc.Layers)) + + for i, l := range cc.Layers { + il := &indexedLayer{oldIndex: i, l: l} + unsortedLayers[i] = il + sortedLayers[i] = il + } + sort.Slice(sortedLayers, func(i, j int) bool { + li := sortedLayers[i].l + lj := sortedLayers[j].l + if li.Blob == lj.Blob { + return li.ParentIndex < lj.ParentIndex + } + return li.Blob < lj.Blob + }) + for i, l := range sortedLayers { + l.newIndex = i + } + + layers := make([]CacheLayer, len(sortedLayers)) + for i, l := range sortedLayers { + if pID := l.l.ParentIndex; pID != -1 { + l.l.ParentIndex = unsortedLayers[pID].newIndex + } + layers[i] = l.l + } + + type indexedRecord struct { + oldIndex int + newIndex int + r CacheRecord + } + + unsortedRecords := make([]*indexedRecord, len(cc.Records)) + sortedRecords := make([]*indexedRecord, len(cc.Records)) + + for i, r := range cc.Records { + ir := &indexedRecord{oldIndex: i, r: r} + unsortedRecords[i] = ir + sortedRecords[i] = ir + } + sort.Slice(sortedRecords, func(i, j int) bool { + ri := sortedRecords[i].r + rj := sortedRecords[j].r + if ri.Digest != rj.Digest { + return ri.Digest < rj.Digest + } + if len(ri.Inputs) != len(ri.Inputs) { + return len(ri.Inputs) < len(ri.Inputs) + } + for i, inputs := range ri.Inputs { + if len(ri.Inputs[i]) != len(rj.Inputs[i]) { + return len(ri.Inputs[i]) < len(rj.Inputs[i]) + } + for j := range inputs { + if ri.Inputs[i][j].Selector != rj.Inputs[i][j].Selector { + return ri.Inputs[i][j].Selector != rj.Inputs[i][j].Selector + } + return cc.Records[ri.Inputs[i][j].LinkIndex].Digest < cc.Records[rj.Inputs[i][j].LinkIndex].Digest + } + } + return ri.Digest < rj.Digest + }) + for i, l := range sortedRecords { + l.newIndex = i + } + + records := make([]CacheRecord, len(sortedRecords)) + for i, r := range sortedRecords { + for j := range r.r.Results { + r.r.Results[j].LayerIndex = unsortedLayers[r.r.Results[j].LayerIndex].newIndex + } + for j, inputs := range r.r.Inputs { + for k := range inputs { + r.r.Inputs[j][k].LinkIndex = unsortedRecords[r.r.Inputs[j][k].LinkIndex].newIndex + } + sort.Slice(inputs, func(i, j int) bool { + return inputs[i].LinkIndex < inputs[j].LinkIndex + }) + } + records[i] = r.r + } + + cc.Layers = layers + cc.Records = records +} + +func outputKey(dgst digest.Digest, idx int) digest.Digest { + return digest.FromBytes([]byte(fmt.Sprintf("%s@%d", dgst, idx))) +} + +type nlink struct { + dgst digest.Digest + input int + selector string +} +type normalizeState struct { + added map[*item]*item + links map[*item]map[nlink]map[digest.Digest]struct{} + byKey map[digest.Digest]*item + next int +} + +func normalizeItem(it *item, state *normalizeState) (*item, error) { + if it2, ok := state.added[it]; ok { + return it2, nil + } + + if len(it.links) == 0 { + id := it.dgst + if it2, ok := state.byKey[id]; ok { + state.added[it] = it2 + return it2, nil + } + state.byKey[id] = it + state.added[it] = it + return nil, nil + } + + matches := map[digest.Digest]struct{}{} + + // check if there is already a matching record + for i, m := range it.links { + if len(m) == 0 { + return nil, errors.Errorf("invalid incomplete links") + } + for l := range m { + nl := nlink{dgst: it.dgst, input: i, selector: l.selector} + it2, err := normalizeItem(l.src, state) + if err != nil { + return nil, err + } + links := state.links[it2][nl] + if i == 0 { + for id := range links { + matches[id] = struct{}{} + } + } else { + for id := range matches { + if _, ok := links[id]; !ok { + delete(matches, id) + } + } + } + } + } + + var id digest.Digest + + links := it.links + + if len(matches) > 0 { + for m := range matches { + if id == "" || id > m { + id = m + } + } + } else { + // keep tmp IDs deterministic + state.next++ + id = digest.FromBytes([]byte(fmt.Sprintf("%d", state.next))) + state.byKey[id] = it + it.links = make([]map[link]struct{}, len(it.links)) + for i := range it.links { + it.links[i] = map[link]struct{}{} + } + } + + it2 := state.byKey[id] + state.added[it] = it2 + + for i, m := range links { + for l := range m { + subIt, err := normalizeItem(l.src, state) + if err != nil { + return nil, err + } + it2.links[i][link{src: subIt, selector: l.selector}] = struct{}{} + + nl := nlink{dgst: it.dgst, input: i, selector: l.selector} + if _, ok := state.links[subIt]; !ok { + state.links[subIt] = map[nlink]map[digest.Digest]struct{}{} + } + if _, ok := state.links[subIt][nl]; !ok { + state.links[subIt][nl] = map[digest.Digest]struct{}{} + } + state.links[subIt][nl][id] = struct{}{} + } + } + + return it2, nil +} + +type marshalState struct { + layers []CacheLayer + chainsByID map[string]int + descriptors DescriptorProvider + + records []CacheRecord + recordsByItem map[*item]int +} + +func marshalRemote(r *solver.Remote, state *marshalState) string { + if len(r.Descriptors) == 0 { + return "" + } + type Remote struct { + Descriptors []ocispec.Descriptor + Provider content.Provider + } + var parentID string + if len(r.Descriptors) > 1 { + r2 := &solver.Remote{ + Descriptors: r.Descriptors[:len(r.Descriptors)-1], + Provider: r.Provider, + } + parentID = marshalRemote(r2, state) + } + desc := r.Descriptors[len(r.Descriptors)-1] + + state.descriptors[desc.Digest] = DescriptorProviderPair{ + Descriptor: desc, + Provider: r.Provider, + } + + id := desc.Digest.String() + parentID + + if _, ok := state.chainsByID[id]; ok { + return id + } + + state.chainsByID[id] = len(state.layers) + l := CacheLayer{ + Blob: desc.Digest, + ParentIndex: -1, + } + if parentID != "" { + l.ParentIndex = state.chainsByID[parentID] + } + state.layers = append(state.layers, l) + return id +} + +func marshalItem(it *item, state *marshalState) error { + if _, ok := state.recordsByItem[it]; ok { + return nil + } + + rec := CacheRecord{ + Digest: it.dgst, + Inputs: make([][]CacheInput, len(it.links)), + } + + for i, m := range it.links { + for l := range m { + if err := marshalItem(l.src, state); err != nil { + return err + } + idx, ok := state.recordsByItem[l.src] + if !ok { + return errors.Errorf("invalid source record: %v", l.src) + } + rec.Inputs[i] = append(rec.Inputs[i], CacheInput{ + Selector: l.selector, + LinkIndex: idx, + }) + } + } + + if it.result != nil { + id := marshalRemote(it.result, state) + if id != "" { + idx, ok := state.chainsByID[id] + if !ok { + return errors.Errorf("parent chainid not found") + } + rec.Results = append(rec.Results, CacheResult{LayerIndex: idx, CreatedAt: it.resultTime}) + } + } + + state.recordsByItem[it] = len(state.records) + state.records = append(state.records, rec) + return nil +} diff --git a/vendor/github.com/moby/buildkit/client/client.go b/vendor/github.com/moby/buildkit/client/client.go new file mode 100644 index 0000000000..c71c00c0e2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/client.go @@ -0,0 +1,136 @@ +package client + +import ( + "context" + "crypto/tls" + "crypto/x509" + "io/ioutil" + "time" + + "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/util/appdefaults" + opentracing "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +type Client struct { + conn *grpc.ClientConn +} + +type ClientOpt interface{} + +// New returns a new buildkit client. Address can be empty for the system-default address. +func New(address string, opts ...ClientOpt) (*Client, error) { + gopts := []grpc.DialOption{ + grpc.WithDialer(dialer), + grpc.FailOnNonTempDialError(true), + } + needWithInsecure := true + for _, o := range opts { + if _, ok := o.(*withBlockOpt); ok { + gopts = append(gopts, grpc.WithBlock(), grpc.FailOnNonTempDialError(true)) + } + if credInfo, ok := o.(*withCredentials); ok { + opt, err := loadCredentials(credInfo) + if err != nil { + return nil, err + } + gopts = append(gopts, opt) + needWithInsecure = false + } + if wt, ok := o.(*withTracer); ok { + gopts = append(gopts, + grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(wt.tracer, otgrpc.LogPayloads())), + grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(wt.tracer))) + } + } + if needWithInsecure { + gopts = append(gopts, grpc.WithInsecure()) + } + if address == "" { + address = appdefaults.Address + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + conn, err := grpc.DialContext(ctx, address, gopts...) + if err != nil { + return nil, errors.Wrapf(err, "failed to dial %q . make sure buildkitd is running", address) + } + c := &Client{ + conn: conn, + } + return c, nil +} + +func (c *Client) controlClient() controlapi.ControlClient { + return controlapi.NewControlClient(c.conn) +} + +func (c *Client) Close() error { + return c.conn.Close() +} + +type withBlockOpt struct{} + +func WithBlock() ClientOpt { + return &withBlockOpt{} +} + +type withCredentials struct { + ServerName string + CACert string + Cert string + Key string +} + +// WithCredentials configures the TLS parameters of the client. +// Arguments: +// * serverName: specifies the name of the target server +// * ca: specifies the filepath of the CA certificate to use for verification +// * cert: specifies the filepath of the client certificate +// * key: specifies the filepath of the client key +func WithCredentials(serverName, ca, cert, key string) ClientOpt { + return &withCredentials{serverName, ca, cert, key} +} + +func loadCredentials(opts *withCredentials) (grpc.DialOption, error) { + ca, err := ioutil.ReadFile(opts.CACert) + if err != nil { + return nil, errors.Wrap(err, "could not read ca certificate") + } + + certPool := x509.NewCertPool() + if ok := certPool.AppendCertsFromPEM(ca); !ok { + return nil, errors.New("failed to append ca certs") + } + + cfg := &tls.Config{ + ServerName: opts.ServerName, + RootCAs: certPool, + } + + // we will produce an error if the user forgot about either cert or key if at least one is specified + if opts.Cert != "" || opts.Key != "" { + cert, err := tls.LoadX509KeyPair(opts.Cert, opts.Key) + if err != nil { + return nil, errors.Wrap(err, "could not read certificate/key") + } + cfg.Certificates = []tls.Certificate{cert} + cfg.BuildNameToCertificate() + } + + return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil +} + +func WithTracer(t opentracing.Tracer) ClientOpt { + return &withTracer{t} +} + +type withTracer struct { + tracer opentracing.Tracer +} diff --git a/vendor/github.com/moby/buildkit/client/client_unix.go b/vendor/github.com/moby/buildkit/client/client_unix.go new file mode 100644 index 0000000000..93afb956f1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/client_unix.go @@ -0,0 +1,19 @@ +// +build !windows + +package client + +import ( + "net" + "strings" + "time" + + "github.com/pkg/errors" +) + +func dialer(address string, timeout time.Duration) (net.Conn, error) { + addrParts := strings.SplitN(address, "://", 2) + if len(addrParts) != 2 { + return nil, errors.Errorf("invalid address %s", address) + } + return net.DialTimeout(addrParts[0], addrParts[1], timeout) +} diff --git a/vendor/github.com/moby/buildkit/client/client_windows.go b/vendor/github.com/moby/buildkit/client/client_windows.go new file mode 100644 index 0000000000..75905f520b --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/client_windows.go @@ -0,0 +1,24 @@ +package client + +import ( + "net" + "strings" + "time" + + "github.com/Microsoft/go-winio" + "github.com/pkg/errors" +) + +func dialer(address string, timeout time.Duration) (net.Conn, error) { + addrParts := strings.SplitN(address, "://", 2) + if len(addrParts) != 2 { + return nil, errors.Errorf("invalid address %s", address) + } + switch addrParts[0] { + case "npipe": + address = strings.Replace(addrParts[1], "/", "\\", 0) + return winio.DialPipe(address, &timeout) + default: + return net.DialTimeout(addrParts[0], addrParts[1], timeout) + } +} diff --git a/vendor/github.com/moby/buildkit/client/diskusage.go b/vendor/github.com/moby/buildkit/client/diskusage.go new file mode 100644 index 0000000000..5ed5043223 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/diskusage.go @@ -0,0 +1,73 @@ +package client + +import ( + "context" + "sort" + "time" + + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/pkg/errors" +) + +type UsageInfo struct { + ID string + Mutable bool + InUse bool + Size int64 + + CreatedAt time.Time + LastUsedAt *time.Time + UsageCount int + Parent string + Description string +} + +func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*UsageInfo, error) { + info := &DiskUsageInfo{} + for _, o := range opts { + o(info) + } + + req := &controlapi.DiskUsageRequest{Filter: info.Filter} + resp, err := c.controlClient().DiskUsage(ctx, req) + if err != nil { + return nil, errors.Wrap(err, "failed to call diskusage") + } + + var du []*UsageInfo + + for _, d := range resp.Record { + du = append(du, &UsageInfo{ + ID: d.ID, + Mutable: d.Mutable, + InUse: d.InUse, + Size: d.Size_, + Parent: d.Parent, + CreatedAt: d.CreatedAt, + Description: d.Description, + UsageCount: int(d.UsageCount), + LastUsedAt: d.LastUsedAt, + }) + } + + sort.Slice(du, func(i, j int) bool { + if du[i].Size == du[j].Size { + return du[i].ID > du[j].ID + } + return du[i].Size > du[j].Size + }) + + return du, nil +} + +type DiskUsageOption func(*DiskUsageInfo) + +type DiskUsageInfo struct { + Filter string +} + +func WithFilter(f string) DiskUsageOption { + return func(di *DiskUsageInfo) { + di.Filter = f + } +} diff --git a/vendor/github.com/moby/buildkit/client/exporters.go b/vendor/github.com/moby/buildkit/client/exporters.go new file mode 100644 index 0000000000..4160d92a73 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/exporters.go @@ -0,0 +1,8 @@ +package client + +const ( + ExporterImage = "image" + ExporterLocal = "local" + ExporterOCI = "oci" + ExporterDocker = "docker" +) diff --git a/vendor/github.com/moby/buildkit/client/graph.go b/vendor/github.com/moby/buildkit/client/graph.go new file mode 100644 index 0000000000..141a393cf9 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/graph.go @@ -0,0 +1,45 @@ +package client + +import ( + "time" + + digest "github.com/opencontainers/go-digest" +) + +type Vertex struct { + Digest digest.Digest + Inputs []digest.Digest + Name string + Started *time.Time + Completed *time.Time + Cached bool + Error string +} + +type VertexStatus struct { + ID string + Vertex digest.Digest + Name string + Total int64 + Current int64 + Timestamp time.Time + Started *time.Time + Completed *time.Time +} + +type VertexLog struct { + Vertex digest.Digest + Stream int + Data []byte + Timestamp time.Time +} + +type SolveStatus struct { + Vertexes []*Vertex + Statuses []*VertexStatus + Logs []*VertexLog +} + +type SolveResponse struct { + ExporterResponse map[string]string +} diff --git a/vendor/github.com/moby/buildkit/client/llb/exec.go b/vendor/github.com/moby/buildkit/client/llb/exec.go new file mode 100644 index 0000000000..98be2653e4 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/exec.go @@ -0,0 +1,387 @@ +package llb + +import ( + _ "crypto/sha256" + "sort" + + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type Meta struct { + Args []string + Env EnvList + Cwd string + User string + ProxyEnv *ProxyEnv +} + +func NewExecOp(root Output, meta Meta, readOnly bool, md OpMetadata) *ExecOp { + e := &ExecOp{meta: meta, cachedOpMetadata: md} + rootMount := &mount{ + target: pb.RootMount, + source: root, + readonly: readOnly, + } + e.mounts = append(e.mounts, rootMount) + if readOnly { + e.root = root + } else { + e.root = &output{vertex: e, getIndex: e.getMountIndexFn(rootMount)} + } + rootMount.output = e.root + + return e +} + +type mount struct { + target string + readonly bool + source Output + output Output + selector string + cacheID string + tmpfs bool + // hasOutput bool +} + +type ExecOp struct { + root Output + mounts []*mount + meta Meta + cachedPBDigest digest.Digest + cachedPB []byte + cachedOpMetadata OpMetadata + isValidated bool +} + +func (e *ExecOp) AddMount(target string, source Output, opt ...MountOption) Output { + m := &mount{ + target: target, + source: source, + } + for _, o := range opt { + o(m) + } + e.mounts = append(e.mounts, m) + if m.readonly { + m.output = source + } else if m.tmpfs { + m.output = &output{vertex: e, err: errors.Errorf("tmpfs mount for %s can't be used as a parent", target)} + } else { + m.output = &output{vertex: e, getIndex: e.getMountIndexFn(m)} + } + e.cachedPB = nil + e.isValidated = false + return m.output +} + +func (e *ExecOp) GetMount(target string) Output { + for _, m := range e.mounts { + if m.target == target { + return m.output + } + } + return nil +} + +func (e *ExecOp) Validate() error { + if e.isValidated { + return nil + } + if len(e.meta.Args) == 0 { + return errors.Errorf("arguments are required") + } + if e.meta.Cwd == "" { + return errors.Errorf("working directory is required") + } + for _, m := range e.mounts { + if m.source != nil { + if err := m.source.Vertex().Validate(); err != nil { + return err + } + } + } + e.isValidated = true + return nil +} + +func (e *ExecOp) Marshal() (digest.Digest, []byte, *OpMetadata, error) { + if e.cachedPB != nil { + return e.cachedPBDigest, e.cachedPB, &e.cachedOpMetadata, nil + } + if err := e.Validate(); err != nil { + return "", nil, nil, err + } + // make sure mounts are sorted + sort.Slice(e.mounts, func(i, j int) bool { + return e.mounts[i].target < e.mounts[j].target + }) + + peo := &pb.ExecOp{ + Meta: &pb.Meta{ + Args: e.meta.Args, + Env: e.meta.Env.ToArray(), + Cwd: e.meta.Cwd, + User: e.meta.User, + }, + } + + if p := e.meta.ProxyEnv; p != nil { + peo.Meta.ProxyEnv = &pb.ProxyEnv{ + HttpProxy: p.HttpProxy, + HttpsProxy: p.HttpsProxy, + FtpProxy: p.FtpProxy, + NoProxy: p.NoProxy, + } + } + + pop := &pb.Op{ + Op: &pb.Op_Exec{ + Exec: peo, + }, + } + + outIndex := 0 + for _, m := range e.mounts { + inputIndex := pb.InputIndex(len(pop.Inputs)) + if m.source != nil { + if m.tmpfs { + return "", nil, nil, errors.Errorf("tmpfs mounts must use scratch") + } + inp, err := m.source.ToInput() + if err != nil { + return "", nil, nil, err + } + + newInput := true + + for i, inp2 := range pop.Inputs { + if *inp == *inp2 { + inputIndex = pb.InputIndex(i) + newInput = false + break + } + } + + if newInput { + pop.Inputs = append(pop.Inputs, inp) + } + } else { + inputIndex = pb.Empty + } + + outputIndex := pb.OutputIndex(-1) + if !m.readonly && m.cacheID == "" && !m.tmpfs { + outputIndex = pb.OutputIndex(outIndex) + outIndex++ + } + + pm := &pb.Mount{ + Input: inputIndex, + Dest: m.target, + Readonly: m.readonly, + Output: outputIndex, + Selector: m.selector, + } + if m.cacheID != "" { + pm.MountType = pb.MountType_CACHE + pm.CacheOpt = &pb.CacheOpt{ + ID: m.cacheID, + } + } + if m.tmpfs { + pm.MountType = pb.MountType_TMPFS + } + peo.Mounts = append(peo.Mounts, pm) + } + + dt, err := pop.Marshal() + if err != nil { + return "", nil, nil, err + } + e.cachedPBDigest = digest.FromBytes(dt) + e.cachedPB = dt + return e.cachedPBDigest, dt, &e.cachedOpMetadata, nil +} + +func (e *ExecOp) Output() Output { + return e.root +} + +func (e *ExecOp) Inputs() (inputs []Output) { + mm := map[Output]struct{}{} + for _, m := range e.mounts { + if m.source != nil { + mm[m.source] = struct{}{} + } + } + for o := range mm { + inputs = append(inputs, o) + } + return +} + +func (e *ExecOp) getMountIndexFn(m *mount) func() (pb.OutputIndex, error) { + return func() (pb.OutputIndex, error) { + // make sure mounts are sorted + sort.Slice(e.mounts, func(i, j int) bool { + return e.mounts[i].target < e.mounts[j].target + }) + + i := 0 + for _, m2 := range e.mounts { + if m2.readonly || m2.cacheID != "" { + continue + } + if m == m2 { + return pb.OutputIndex(i), nil + } + i++ + } + return pb.OutputIndex(0), errors.Errorf("invalid mount: %s", m.target) + } +} + +type ExecState struct { + State + exec *ExecOp +} + +func (e ExecState) AddMount(target string, source State, opt ...MountOption) State { + return source.WithOutput(e.exec.AddMount(target, source.Output(), opt...)) +} + +func (e ExecState) GetMount(target string) State { + return NewState(e.exec.GetMount(target)) +} + +func (e ExecState) Root() State { + return e.State +} + +type MountOption func(*mount) + +func Readonly(m *mount) { + m.readonly = true +} + +func SourcePath(src string) MountOption { + return func(m *mount) { + m.selector = src + } +} + +func AsPersistentCacheDir(id string) MountOption { + return func(m *mount) { + m.cacheID = id + } +} + +func Tmpfs() MountOption { + return func(m *mount) { + m.tmpfs = true + } +} + +type RunOption interface { + SetRunOption(es *ExecInfo) +} + +type runOptionFunc func(*ExecInfo) + +func (fn runOptionFunc) SetRunOption(ei *ExecInfo) { + fn(ei) +} + +func Shlex(str string) RunOption { + return Shlexf(str) +} +func Shlexf(str string, v ...interface{}) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = shlexf(str, v...)(ei.State) + }) +} + +func Args(a []string) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = args(a...)(ei.State) + }) +} + +func AddEnv(key, value string) RunOption { + return AddEnvf(key, value) +} + +func AddEnvf(key, value string, v ...interface{}) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.AddEnvf(key, value, v...) + }) +} + +func User(str string) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.User(str) + }) +} + +func Dir(str string) RunOption { + return Dirf(str) +} +func Dirf(str string, v ...interface{}) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.Dirf(str, v...) + }) +} + +func Reset(s State) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.Reset(s) + }) +} + +func With(so ...StateOption) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.With(so...) + }) +} + +func AddMount(dest string, mountState State, opts ...MountOption) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.Mounts = append(ei.Mounts, MountInfo{dest, mountState.Output(), opts}) + }) +} + +func ReadonlyRootFS() RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.ReadonlyRootFS = true + }) +} + +func WithProxy(ps ProxyEnv) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.ProxyEnv = &ps + }) +} + +type ExecInfo struct { + opMetaWrapper + State State + Mounts []MountInfo + ReadonlyRootFS bool + ProxyEnv *ProxyEnv +} + +type MountInfo struct { + Target string + Source Output + Opts []MountOption +} + +type ProxyEnv struct { + HttpProxy string + HttpsProxy string + FtpProxy string + NoProxy string +} diff --git a/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go b/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go new file mode 100644 index 0000000000..d7d0d7460f --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go @@ -0,0 +1,87 @@ +package imagemetaresolver + +import ( + "context" + "net/http" + "sync" + + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + "github.com/docker/docker/pkg/locker" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/util/imageutil" + digest "github.com/opencontainers/go-digest" +) + +var defaultImageMetaResolver llb.ImageMetaResolver +var defaultImageMetaResolverOnce sync.Once + +var WithDefault = llb.ImageOptionFunc(func(ii *llb.ImageInfo) { + llb.WithMetaResolver(Default()).SetImageOption(ii) +}) + +type imageMetaResolverOpts struct { + platform string +} + +type ImageMetaResolverOpt func(o *imageMetaResolverOpts) + +func WithPlatform(p string) ImageMetaResolverOpt { + return func(o *imageMetaResolverOpts) { + o.platform = p + } +} + +func New(with ...ImageMetaResolverOpt) llb.ImageMetaResolver { + var opts imageMetaResolverOpts + for _, f := range with { + f(&opts) + } + return &imageMetaResolver{ + resolver: docker.NewResolver(docker.ResolverOptions{ + Client: http.DefaultClient, + }), + platform: opts.platform, + buffer: contentutil.NewBuffer(), + cache: map[string]resolveResult{}, + locker: locker.New(), + } +} + +func Default() llb.ImageMetaResolver { + defaultImageMetaResolverOnce.Do(func() { + defaultImageMetaResolver = New() + }) + return defaultImageMetaResolver +} + +type imageMetaResolver struct { + resolver remotes.Resolver + buffer contentutil.Buffer + platform string + locker *locker.Locker + cache map[string]resolveResult +} + +type resolveResult struct { + config []byte + dgst digest.Digest +} + +func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error) { + imr.locker.Lock(ref) + defer imr.locker.Unlock(ref) + + if res, ok := imr.cache[ref]; ok { + return res.dgst, res.config, nil + } + + dgst, config, err := imageutil.Config(ctx, ref, imr.resolver, imr.buffer, imr.platform) + if err != nil { + return "", nil, err + } + + imr.cache[ref] = resolveResult{dgst: dgst, config: config} + return dgst, config, nil +} diff --git a/vendor/github.com/moby/buildkit/client/llb/marshal.go b/vendor/github.com/moby/buildkit/client/llb/marshal.go new file mode 100644 index 0000000000..4d8ad5557a --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/marshal.go @@ -0,0 +1,60 @@ +package llb + +import ( + "io" + "io/ioutil" + + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" +) + +// Definition is the LLB definition structure with per-vertex metadata entries +// Corresponds to the Definition structure defined in solver/pb.Definition. +type Definition struct { + Def [][]byte + Metadata map[digest.Digest]OpMetadata +} + +func (def *Definition) ToPB() *pb.Definition { + md := make(map[digest.Digest]OpMetadata) + for k, v := range def.Metadata { + md[k] = v + } + return &pb.Definition{ + Def: def.Def, + Metadata: md, + } +} + +func (def *Definition) FromPB(x *pb.Definition) { + def.Def = x.Def + def.Metadata = make(map[digest.Digest]OpMetadata) + for k, v := range x.Metadata { + def.Metadata[k] = v + } +} + +type OpMetadata = pb.OpMetadata + +func WriteTo(def *Definition, w io.Writer) error { + b, err := def.ToPB().Marshal() + if err != nil { + return err + } + _, err = w.Write(b) + return err +} + +func ReadFrom(r io.Reader) (*Definition, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + var pbDef pb.Definition + if err := pbDef.Unmarshal(b); err != nil { + return nil, err + } + var def Definition + def.FromPB(&pbDef) + return &def, nil +} diff --git a/vendor/github.com/moby/buildkit/client/llb/meta.go b/vendor/github.com/moby/buildkit/client/llb/meta.go new file mode 100644 index 0000000000..54449ff606 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/meta.go @@ -0,0 +1,152 @@ +package llb + +import ( + "fmt" + "path" + + "github.com/google/shlex" +) + +type contextKeyT string + +var ( + keyArgs = contextKeyT("llb.exec.args") + keyDir = contextKeyT("llb.exec.dir") + keyEnv = contextKeyT("llb.exec.env") + keyUser = contextKeyT("llb.exec.user") +) + +func addEnv(key, value string) StateOption { + return addEnvf(key, value) +} + +func addEnvf(key, value string, v ...interface{}) StateOption { + return func(s State) State { + return s.WithValue(keyEnv, getEnv(s).AddOrReplace(key, fmt.Sprintf(value, v...))) + } +} + +func dir(str string) StateOption { + return dirf(str) +} + +func dirf(str string, v ...interface{}) StateOption { + return func(s State) State { + value := fmt.Sprintf(str, v...) + if !path.IsAbs(value) { + prev := getDir(s) + if prev == "" { + prev = "/" + } + value = path.Join(prev, value) + } + return s.WithValue(keyDir, value) + } +} + +func user(str string) StateOption { + return func(s State) State { + return s.WithValue(keyUser, str) + } +} + +func reset(s_ State) StateOption { + return func(s State) State { + s = NewState(s.Output()) + s.ctx = s_.ctx + return s + } +} + +func getEnv(s State) EnvList { + v := s.Value(keyEnv) + if v != nil { + return v.(EnvList) + } + return EnvList{} +} + +func getDir(s State) string { + v := s.Value(keyDir) + if v != nil { + return v.(string) + } + return "" +} + +func getArgs(s State) []string { + v := s.Value(keyArgs) + if v != nil { + return v.([]string) + } + return nil +} + +func getUser(s State) string { + v := s.Value(keyUser) + if v != nil { + return v.(string) + } + return "" +} + +func args(args ...string) StateOption { + return func(s State) State { + return s.WithValue(keyArgs, args) + } +} + +func shlexf(str string, v ...interface{}) StateOption { + return func(s State) State { + arg, err := shlex.Split(fmt.Sprintf(str, v...)) + if err != nil { + // TODO: handle error + } + return args(arg...)(s) + } +} + +type EnvList []KeyValue + +type KeyValue struct { + key string + value string +} + +func (e EnvList) AddOrReplace(k, v string) EnvList { + e = e.Delete(k) + e = append(e, KeyValue{key: k, value: v}) + return e +} + +func (e EnvList) Delete(k string) EnvList { + e = append([]KeyValue(nil), e...) + if i, ok := e.Index(k); ok { + return append(e[:i], e[i+1:]...) + } + return e +} + +func (e EnvList) Get(k string) (string, bool) { + if index, ok := e.Index(k); ok { + return e[index].value, true + } + return "", false +} + +func (e EnvList) Index(k string) (int, bool) { + for i, kv := range e { + if kv.key == k { + return i, true + } + } + return -1, false +} + +func (e EnvList) ToArray() []string { + out := make([]string, 0, len(e)) + for _, kv := range e { + out = append(out, kv.key+"="+kv.value) + } + return out +} diff --git a/vendor/github.com/moby/buildkit/client/llb/resolver.go b/vendor/github.com/moby/buildkit/client/llb/resolver.go new file mode 100644 index 0000000000..bac738c967 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/resolver.go @@ -0,0 +1,17 @@ +package llb + +import ( + "context" + + digest "github.com/opencontainers/go-digest" +) + +func WithMetaResolver(mr ImageMetaResolver) ImageOption { + return ImageOptionFunc(func(ii *ImageInfo) { + ii.metaResolver = mr + }) +} + +type ImageMetaResolver interface { + ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error) +} diff --git a/vendor/github.com/moby/buildkit/client/llb/source.go b/vendor/github.com/moby/buildkit/client/llb/source.go new file mode 100644 index 0000000000..5b1bf061bf --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/source.go @@ -0,0 +1,359 @@ +package llb + +import ( + "context" + _ "crypto/sha256" + "encoding/json" + "os" + "strconv" + "strings" + + "github.com/docker/distribution/reference" + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type SourceOp struct { + id string + attrs map[string]string + output Output + cachedPBDigest digest.Digest + cachedPB []byte + cachedOpMetadata OpMetadata + err error +} + +func NewSource(id string, attrs map[string]string, md OpMetadata) *SourceOp { + s := &SourceOp{ + id: id, + attrs: attrs, + cachedOpMetadata: md, + } + s.output = &output{vertex: s} + return s +} + +func (s *SourceOp) Validate() error { + if s.err != nil { + return s.err + } + if s.id == "" { + return errors.Errorf("source identifier can't be empty") + } + return nil +} + +func (s *SourceOp) Marshal() (digest.Digest, []byte, *OpMetadata, error) { + if s.cachedPB != nil { + return s.cachedPBDigest, s.cachedPB, &s.cachedOpMetadata, nil + } + if err := s.Validate(); err != nil { + return "", nil, nil, err + } + + proto := &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{Identifier: s.id, Attrs: s.attrs}, + }, + } + dt, err := proto.Marshal() + if err != nil { + return "", nil, nil, err + } + s.cachedPB = dt + s.cachedPBDigest = digest.FromBytes(dt) + return s.cachedPBDigest, dt, &s.cachedOpMetadata, nil +} + +func (s *SourceOp) Output() Output { + return s.output +} + +func (s *SourceOp) Inputs() []Output { + return nil +} + +func Source(id string) State { + return NewState(NewSource(id, nil, OpMetadata{}).Output()) +} + +func Image(ref string, opts ...ImageOption) State { + r, err := reference.ParseNormalizedNamed(ref) + if err == nil { + ref = reference.TagNameOnly(r).String() + } + var info ImageInfo + for _, opt := range opts { + opt.SetImageOption(&info) + } + src := NewSource("docker-image://"+ref, nil, info.Metadata()) // controversial + if err != nil { + src.err = err + } + if info.metaResolver != nil { + _, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref) + if err != nil { + src.err = err + } else { + var img struct { + Config struct { + Env []string `json:"Env,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty"` + User string `json:"User,omitempty"` + } `json:"config,omitempty"` + } + if err := json.Unmarshal(dt, &img); err != nil { + src.err = err + } else { + st := NewState(src.Output()) + for _, env := range img.Config.Env { + parts := strings.SplitN(env, "=", 2) + if len(parts[0]) > 0 { + var v string + if len(parts) > 1 { + v = parts[1] + } + st = st.AddEnv(parts[0], v) + } + } + st = st.Dir(img.Config.WorkingDir) + return st + } + } + } + return NewState(src.Output()) +} + +type ImageOption interface { + SetImageOption(*ImageInfo) +} + +type ImageOptionFunc func(*ImageInfo) + +func (fn ImageOptionFunc) SetImageOption(ii *ImageInfo) { + fn(ii) +} + +type ImageInfo struct { + opMetaWrapper + metaResolver ImageMetaResolver +} + +func Git(remote, ref string, opts ...GitOption) State { + url := "" + + for _, prefix := range []string{ + "http://", "https://", "git://", "git@", + } { + if strings.HasPrefix(remote, prefix) { + url = strings.Split(remote, "#")[0] + remote = strings.TrimPrefix(remote, prefix) + } + } + + id := remote + + if ref != "" { + id += "#" + ref + } + + gi := &GitInfo{} + for _, o := range opts { + o.SetGitOption(gi) + } + attrs := map[string]string{} + if gi.KeepGitDir { + attrs[pb.AttrKeepGitDir] = "true" + } + if url != "" { + attrs[pb.AttrFullRemoteURL] = url + } + source := NewSource("git://"+id, attrs, gi.Metadata()) + return NewState(source.Output()) +} + +type GitOption interface { + SetGitOption(*GitInfo) +} +type gitOptionFunc func(*GitInfo) + +func (fn gitOptionFunc) SetGitOption(gi *GitInfo) { + fn(gi) +} + +type GitInfo struct { + opMetaWrapper + KeepGitDir bool +} + +func KeepGitDir() GitOption { + return gitOptionFunc(func(gi *GitInfo) { + gi.KeepGitDir = true + }) +} + +func Scratch() State { + return NewState(nil) +} + +func Local(name string, opts ...LocalOption) State { + gi := &LocalInfo{} + + for _, o := range opts { + o.SetLocalOption(gi) + } + attrs := map[string]string{} + if gi.SessionID != "" { + attrs[pb.AttrLocalSessionID] = gi.SessionID + } + if gi.IncludePatterns != "" { + attrs[pb.AttrIncludePatterns] = gi.IncludePatterns + } + if gi.FollowPaths != "" { + attrs[pb.AttrFollowPaths] = gi.FollowPaths + } + if gi.ExcludePatterns != "" { + attrs[pb.AttrExcludePatterns] = gi.ExcludePatterns + } + if gi.SharedKeyHint != "" { + attrs[pb.AttrSharedKeyHint] = gi.SharedKeyHint + } + + source := NewSource("local://"+name, attrs, gi.Metadata()) + return NewState(source.Output()) +} + +type LocalOption interface { + SetLocalOption(*LocalInfo) +} + +type localOptionFunc func(*LocalInfo) + +func (fn localOptionFunc) SetLocalOption(li *LocalInfo) { + fn(li) +} + +func SessionID(id string) LocalOption { + return localOptionFunc(func(li *LocalInfo) { + li.SessionID = id + }) +} + +func IncludePatterns(p []string) LocalOption { + return localOptionFunc(func(li *LocalInfo) { + if len(p) == 0 { + li.IncludePatterns = "" + return + } + dt, _ := json.Marshal(p) // empty on error + li.IncludePatterns = string(dt) + }) +} + +func FollowPaths(p []string) LocalOption { + return localOptionFunc(func(li *LocalInfo) { + if len(p) == 0 { + li.FollowPaths = "" + return + } + dt, _ := json.Marshal(p) // empty on error + li.FollowPaths = string(dt) + }) +} + +func ExcludePatterns(p []string) LocalOption { + return localOptionFunc(func(li *LocalInfo) { + if len(p) == 0 { + li.ExcludePatterns = "" + return + } + dt, _ := json.Marshal(p) // empty on error + li.ExcludePatterns = string(dt) + }) +} + +func SharedKeyHint(h string) LocalOption { + return localOptionFunc(func(li *LocalInfo) { + li.SharedKeyHint = h + }) +} + +type LocalInfo struct { + opMetaWrapper + SessionID string + IncludePatterns string + ExcludePatterns string + FollowPaths string + SharedKeyHint string +} + +func HTTP(url string, opts ...HTTPOption) State { + hi := &HTTPInfo{} + for _, o := range opts { + o.SetHTTPOption(hi) + } + attrs := map[string]string{} + if hi.Checksum != "" { + attrs[pb.AttrHTTPChecksum] = hi.Checksum.String() + } + if hi.Filename != "" { + attrs[pb.AttrHTTPFilename] = hi.Filename + } + if hi.Perm != 0 { + attrs[pb.AttrHTTPPerm] = "0" + strconv.FormatInt(int64(hi.Perm), 8) + } + if hi.UID != 0 { + attrs[pb.AttrHTTPUID] = strconv.Itoa(hi.UID) + } + if hi.UID != 0 { + attrs[pb.AttrHTTPGID] = strconv.Itoa(hi.GID) + } + + source := NewSource(url, attrs, hi.Metadata()) + return NewState(source.Output()) +} + +type HTTPInfo struct { + opMetaWrapper + Checksum digest.Digest + Filename string + Perm int + UID int + GID int +} + +type HTTPOption interface { + SetHTTPOption(*HTTPInfo) +} + +type httpOptionFunc func(*HTTPInfo) + +func (fn httpOptionFunc) SetHTTPOption(hi *HTTPInfo) { + fn(hi) +} + +func Checksum(dgst digest.Digest) HTTPOption { + return httpOptionFunc(func(hi *HTTPInfo) { + hi.Checksum = dgst + }) +} + +func Chmod(perm os.FileMode) HTTPOption { + return httpOptionFunc(func(hi *HTTPInfo) { + hi.Perm = int(perm) & 0777 + }) +} + +func Filename(name string) HTTPOption { + return httpOptionFunc(func(hi *HTTPInfo) { + hi.Filename = name + }) +} + +func Chown(uid, gid int) HTTPOption { + return httpOptionFunc(func(hi *HTTPInfo) { + hi.UID = uid + hi.GID = gid + }) +} diff --git a/vendor/github.com/moby/buildkit/client/llb/state.go b/vendor/github.com/moby/buildkit/client/llb/state.go new file mode 100644 index 0000000000..a53b212ebb --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/state.go @@ -0,0 +1,316 @@ +package llb + +import ( + "context" + + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/system" + digest "github.com/opencontainers/go-digest" +) + +type StateOption func(State) State + +type Output interface { + ToInput() (*pb.Input, error) + Vertex() Vertex +} + +type Vertex interface { + Validate() error + Marshal() (digest.Digest, []byte, *OpMetadata, error) + Output() Output + Inputs() []Output +} + +func NewState(o Output) State { + s := State{ + out: o, + ctx: context.Background(), + } + s = dir("/")(s) + s = addEnv("PATH", system.DefaultPathEnv)(s) + return s +} + +type State struct { + out Output + ctx context.Context +} + +func (s State) WithValue(k, v interface{}) State { + return State{ + out: s.out, + ctx: context.WithValue(s.ctx, k, v), + } +} + +func (s State) Value(k interface{}) interface{} { + return s.ctx.Value(k) +} + +func (s State) Marshal(md ...MetadataOpt) (*Definition, error) { + def := &Definition{ + Metadata: make(map[digest.Digest]OpMetadata, 0), + } + if s.Output() == nil { + return def, nil + } + def, err := marshal(s.Output().Vertex(), def, map[digest.Digest]struct{}{}, map[Vertex]struct{}{}, md) + if err != nil { + return def, err + } + inp, err := s.Output().ToInput() + if err != nil { + return def, err + } + proto := &pb.Op{Inputs: []*pb.Input{inp}} + dt, err := proto.Marshal() + if err != nil { + return def, err + } + def.Def = append(def.Def, dt) + return def, nil +} + +func marshal(v Vertex, def *Definition, cache map[digest.Digest]struct{}, vertexCache map[Vertex]struct{}, md []MetadataOpt) (*Definition, error) { + if _, ok := vertexCache[v]; ok { + return def, nil + } + for _, inp := range v.Inputs() { + var err error + def, err = marshal(inp.Vertex(), def, cache, vertexCache, md) + if err != nil { + return def, err + } + } + + dgst, dt, opMeta, err := v.Marshal() + if err != nil { + return def, err + } + vertexCache[v] = struct{}{} + if opMeta != nil { + m := mergeMetadata(def.Metadata[dgst], *opMeta) + for _, f := range md { + f.SetMetadataOption(&m) + } + def.Metadata[dgst] = m + } + if _, ok := cache[dgst]; ok { + return def, nil + } + def.Def = append(def.Def, dt) + cache[dgst] = struct{}{} + return def, nil +} + +func (s State) Validate() error { + return s.Output().Vertex().Validate() +} + +func (s State) Output() Output { + return s.out +} + +func (s State) WithOutput(o Output) State { + return State{ + out: o, + ctx: s.ctx, + } +} + +func (s State) Run(ro ...RunOption) ExecState { + ei := &ExecInfo{State: s} + for _, o := range ro { + o.SetRunOption(ei) + } + meta := Meta{ + Args: getArgs(ei.State), + Cwd: getDir(ei.State), + Env: getEnv(ei.State), + User: getUser(ei.State), + ProxyEnv: ei.ProxyEnv, + } + + exec := NewExecOp(s.Output(), meta, ei.ReadonlyRootFS, ei.Metadata()) + for _, m := range ei.Mounts { + exec.AddMount(m.Target, m.Source, m.Opts...) + } + + return ExecState{ + State: s.WithOutput(exec.Output()), + exec: exec, + } +} + +func (s State) AddEnv(key, value string) State { + return s.AddEnvf(key, value) +} + +func (s State) AddEnvf(key, value string, v ...interface{}) State { + return addEnvf(key, value, v...)(s) +} + +func (s State) Dir(str string) State { + return s.Dirf(str) +} +func (s State) Dirf(str string, v ...interface{}) State { + return dirf(str, v...)(s) +} + +func (s State) GetEnv(key string) (string, bool) { + return getEnv(s).Get(key) +} + +func (s State) GetDir() string { + return getDir(s) +} + +func (s State) GetArgs() []string { + return getArgs(s) +} + +func (s State) Reset(s2 State) State { + return reset(s2)(s) +} + +func (s State) User(v string) State { + return user(v)(s) +} + +func (s State) With(so ...StateOption) State { + for _, o := range so { + s = o(s) + } + return s +} + +type output struct { + vertex Vertex + getIndex func() (pb.OutputIndex, error) + err error +} + +func (o *output) ToInput() (*pb.Input, error) { + if o.err != nil { + return nil, o.err + } + var index pb.OutputIndex + if o.getIndex != nil { + var err error + index, err = o.getIndex() + if err != nil { + return nil, err + } + } + dgst, _, _, err := o.vertex.Marshal() + if err != nil { + return nil, err + } + return &pb.Input{Digest: dgst, Index: index}, nil +} + +func (o *output) Vertex() Vertex { + return o.vertex +} + +type MetadataOpt interface { + SetMetadataOption(*OpMetadata) + RunOption + LocalOption + HTTPOption + ImageOption + GitOption +} + +type metadataOptFunc func(m *OpMetadata) + +func (fn metadataOptFunc) SetMetadataOption(m *OpMetadata) { + fn(m) +} + +func (fn metadataOptFunc) SetRunOption(ei *ExecInfo) { + ei.ApplyMetadata(fn) +} + +func (fn metadataOptFunc) SetLocalOption(li *LocalInfo) { + li.ApplyMetadata(fn) +} + +func (fn metadataOptFunc) SetHTTPOption(hi *HTTPInfo) { + hi.ApplyMetadata(fn) +} + +func (fn metadataOptFunc) SetImageOption(ii *ImageInfo) { + ii.ApplyMetadata(fn) +} + +func (fn metadataOptFunc) SetGitOption(gi *GitInfo) { + gi.ApplyMetadata(fn) +} + +func mergeMetadata(m1, m2 OpMetadata) OpMetadata { + if m2.IgnoreCache { + m1.IgnoreCache = true + } + if len(m2.Description) > 0 { + if m1.Description == nil { + m1.Description = make(map[string]string) + } + for k, v := range m2.Description { + m1.Description[k] = v + } + } + if m2.ExportCache != nil { + m1.ExportCache = m2.ExportCache + } + + return m1 +} + +var IgnoreCache = metadataOptFunc(func(md *OpMetadata) { + md.IgnoreCache = true +}) + +func WithDescription(m map[string]string) MetadataOpt { + return metadataOptFunc(func(md *OpMetadata) { + md.Description = m + }) +} + +// WithExportCache forces results for this vertex to be exported with the cache +func WithExportCache() MetadataOpt { + return metadataOptFunc(func(md *OpMetadata) { + md.ExportCache = &pb.ExportCache{Value: true} + }) +} + +// WithoutExportCache sets results for this vertex to be not exported with +// the cache +func WithoutExportCache() MetadataOpt { + return metadataOptFunc(func(md *OpMetadata) { + // ExportCache with value false means to disable exporting + md.ExportCache = &pb.ExportCache{Value: false} + }) +} + +// WithoutDefaultExportCache resets the cache export for the vertex to use +// the default defined by the build configuration. +func WithoutDefaultExportCache() MetadataOpt { + return metadataOptFunc(func(md *OpMetadata) { + // nil means no vertex based config has been set + md.ExportCache = nil + }) +} + +type opMetaWrapper struct { + OpMetadata +} + +func (mw *opMetaWrapper) ApplyMetadata(f func(m *OpMetadata)) { + f(&mw.OpMetadata) +} + +func (mw *opMetaWrapper) Metadata() OpMetadata { + return mw.OpMetadata +} diff --git a/vendor/github.com/moby/buildkit/client/prune.go b/vendor/github.com/moby/buildkit/client/prune.go new file mode 100644 index 0000000000..b3c1edcd2b --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/prune.go @@ -0,0 +1,50 @@ +package client + +import ( + "context" + "io" + + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/pkg/errors" +) + +func (c *Client) Prune(ctx context.Context, ch chan UsageInfo, opts ...PruneOption) error { + info := &PruneInfo{} + for _, o := range opts { + o(info) + } + + req := &controlapi.PruneRequest{} + cl, err := c.controlClient().Prune(ctx, req) + if err != nil { + return errors.Wrap(err, "failed to call prune") + } + + for { + d, err := cl.Recv() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + if ch != nil { + ch <- UsageInfo{ + ID: d.ID, + Mutable: d.Mutable, + InUse: d.InUse, + Size: d.Size_, + Parent: d.Parent, + CreatedAt: d.CreatedAt, + Description: d.Description, + UsageCount: int(d.UsageCount), + LastUsedAt: d.LastUsedAt, + } + } + } +} + +type PruneOption func(*PruneInfo) + +type PruneInfo struct { +} diff --git a/vendor/github.com/moby/buildkit/client/solve.go b/vendor/github.com/moby/buildkit/client/solve.go new file mode 100644 index 0000000000..972b6b3ec9 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/solve.go @@ -0,0 +1,251 @@ +package client + +import ( + "context" + "io" + "os" + "path/filepath" + "strings" + "time" + + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/filesync" + "github.com/moby/buildkit/session/grpchijack" + "github.com/moby/buildkit/solver/pb" + opentracing "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" +) + +type SolveOpt struct { + Exporter string + ExporterAttrs map[string]string + ExporterOutput io.WriteCloser // for ExporterOCI and ExporterDocker + ExporterOutputDir string // for ExporterLocal + LocalDirs map[string]string + SharedKey string + Frontend string + FrontendAttrs map[string]string + ExportCache string + ExportCacheAttrs map[string]string + ImportCache []string + Session []session.Attachable +} + +// Solve calls Solve on the controller. +// def must be nil if (and only if) opt.Frontend is set. +func (c *Client) Solve(ctx context.Context, def *llb.Definition, opt SolveOpt, statusChan chan *SolveStatus) (*SolveResponse, error) { + defer func() { + if statusChan != nil { + close(statusChan) + } + }() + + if opt.Frontend == "" && def == nil { + return nil, errors.New("invalid empty definition") + } + if opt.Frontend != "" && def != nil { + return nil, errors.Errorf("invalid definition for frontend %s", opt.Frontend) + } + + syncedDirs, err := prepareSyncedDirs(def, opt.LocalDirs) + if err != nil { + return nil, err + } + + ref := identity.NewID() + eg, ctx := errgroup.WithContext(ctx) + + statusContext, cancelStatus := context.WithCancel(context.Background()) + defer cancelStatus() + + if span := opentracing.SpanFromContext(ctx); span != nil { + statusContext = opentracing.ContextWithSpan(statusContext, span) + } + + s, err := session.NewSession(statusContext, defaultSessionName(), opt.SharedKey) + if err != nil { + return nil, errors.Wrap(err, "failed to create session") + } + + if len(syncedDirs) > 0 { + s.Allow(filesync.NewFSSyncProvider(syncedDirs)) + } + + for _, a := range opt.Session { + s.Allow(a) + } + + switch opt.Exporter { + case ExporterLocal: + if opt.ExporterOutput != nil { + return nil, errors.New("output file writer is not supported by local exporter") + } + if opt.ExporterOutputDir == "" { + return nil, errors.New("output directory is required for local exporter") + } + s.Allow(filesync.NewFSSyncTargetDir(opt.ExporterOutputDir)) + case ExporterOCI, ExporterDocker: + if opt.ExporterOutputDir != "" { + return nil, errors.Errorf("output directory %s is not supported by %s exporter", opt.ExporterOutputDir, opt.Exporter) + } + if opt.ExporterOutput == nil { + return nil, errors.Errorf("output file writer is required for %s exporter", opt.Exporter) + } + s.Allow(filesync.NewFSSyncTarget(opt.ExporterOutput)) + default: + if opt.ExporterOutput != nil { + return nil, errors.Errorf("output file writer is not supported by %s exporter", opt.Exporter) + } + if opt.ExporterOutputDir != "" { + return nil, errors.Errorf("output directory %s is not supported by %s exporter", opt.ExporterOutputDir, opt.Exporter) + } + } + + eg.Go(func() error { + return s.Run(statusContext, grpchijack.Dialer(c.controlClient())) + }) + + var res *SolveResponse + eg.Go(func() error { + defer func() { // make sure the Status ends cleanly on build errors + go func() { + <-time.After(3 * time.Second) + cancelStatus() + }() + logrus.Debugf("stopping session") + s.Close() + }() + var pbd *pb.Definition + if def != nil { + pbd = def.ToPB() + } + resp, err := c.controlClient().Solve(ctx, &controlapi.SolveRequest{ + Ref: ref, + Definition: pbd, + Exporter: opt.Exporter, + ExporterAttrs: opt.ExporterAttrs, + Session: s.ID(), + Frontend: opt.Frontend, + FrontendAttrs: opt.FrontendAttrs, + Cache: controlapi.CacheOptions{ + ExportRef: opt.ExportCache, + ImportRefs: opt.ImportCache, + ExportAttrs: opt.ExportCacheAttrs, + }, + }) + if err != nil { + return errors.Wrap(err, "failed to solve") + } + res = &SolveResponse{ + ExporterResponse: resp.ExporterResponse, + } + return nil + }) + + eg.Go(func() error { + stream, err := c.controlClient().Status(statusContext, &controlapi.StatusRequest{ + Ref: ref, + }) + if err != nil { + return errors.Wrap(err, "failed to get status") + } + for { + resp, err := stream.Recv() + if err != nil { + if err == io.EOF { + return nil + } + return errors.Wrap(err, "failed to receive status") + } + s := SolveStatus{} + for _, v := range resp.Vertexes { + s.Vertexes = append(s.Vertexes, &Vertex{ + Digest: v.Digest, + Inputs: v.Inputs, + Name: v.Name, + Started: v.Started, + Completed: v.Completed, + Error: v.Error, + Cached: v.Cached, + }) + } + for _, v := range resp.Statuses { + s.Statuses = append(s.Statuses, &VertexStatus{ + ID: v.ID, + Vertex: v.Vertex, + Name: v.Name, + Total: v.Total, + Current: v.Current, + Timestamp: v.Timestamp, + Started: v.Started, + Completed: v.Completed, + }) + } + for _, v := range resp.Logs { + s.Logs = append(s.Logs, &VertexLog{ + Vertex: v.Vertex, + Stream: int(v.Stream), + Data: v.Msg, + Timestamp: v.Timestamp, + }) + } + if statusChan != nil { + statusChan <- &s + } + } + }) + + if err := eg.Wait(); err != nil { + return nil, err + } + return res, nil +} + +func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]filesync.SyncedDir, error) { + for _, d := range localDirs { + fi, err := os.Stat(d) + if err != nil { + return nil, errors.Wrapf(err, "could not find %s", d) + } + if !fi.IsDir() { + return nil, errors.Errorf("%s not a directory", d) + } + } + dirs := make([]filesync.SyncedDir, 0, len(localDirs)) + if def == nil { + for name, d := range localDirs { + dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d}) + } + } else { + for _, dt := range def.Def { + var op pb.Op + if err := (&op).Unmarshal(dt); err != nil { + return nil, errors.Wrap(err, "failed to parse llb proto op") + } + if src := op.GetSource(); src != nil { + if strings.HasPrefix(src.Identifier, "local://") { // TODO: just make a type property + name := strings.TrimPrefix(src.Identifier, "local://") + d, ok := localDirs[name] + if !ok { + return nil, errors.Errorf("local directory %s not enabled", name) + } + dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d}) // TODO: excludes + } + } + } + } + return dirs, nil +} + +func defaultSessionName() string { + wd, err := os.Getwd() + if err != nil { + return "unknown" + } + return filepath.Base(wd) +} diff --git a/vendor/github.com/moby/buildkit/client/workers.go b/vendor/github.com/moby/buildkit/client/workers.go new file mode 100644 index 0000000000..b4ccb82d4c --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/workers.go @@ -0,0 +1,49 @@ +package client + +import ( + "context" + + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/pkg/errors" +) + +type WorkerInfo struct { + ID string + Labels map[string]string +} + +func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]*WorkerInfo, error) { + info := &ListWorkersInfo{} + for _, o := range opts { + o(info) + } + + req := &controlapi.ListWorkersRequest{Filter: info.Filter} + resp, err := c.controlClient().ListWorkers(ctx, req) + if err != nil { + return nil, errors.Wrap(err, "failed to list workers") + } + + var wi []*WorkerInfo + + for _, w := range resp.Record { + wi = append(wi, &WorkerInfo{ + ID: w.ID, + Labels: w.Labels, + }) + } + + return wi, nil +} + +type ListWorkersOption func(*ListWorkersInfo) + +type ListWorkersInfo struct { + Filter []string +} + +func WithWorkerFilter(f []string) ListWorkersOption { + return func(wi *ListWorkersInfo) { + wi.Filter = f + } +} diff --git a/vendor/github.com/moby/buildkit/control/control.go b/vendor/github.com/moby/buildkit/control/control.go new file mode 100644 index 0000000000..ac9060501c --- /dev/null +++ b/vendor/github.com/moby/buildkit/control/control.go @@ -0,0 +1,292 @@ +package control + +import ( + "context" + + "github.com/docker/distribution/reference" + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/cache/remotecache" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/grpchijack" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/worker" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc" +) + +type Opt struct { + SessionManager *session.Manager + WorkerController *worker.Controller + Frontends map[string]frontend.Frontend + CacheKeyStorage solver.CacheKeyStorage + CacheExporter *remotecache.CacheExporter + CacheImporter *remotecache.CacheImporter +} + +type Controller struct { // TODO: ControlService + opt Opt + solver *llbsolver.Solver +} + +func NewController(opt Opt) (*Controller, error) { + solver := llbsolver.New(opt.WorkerController, opt.Frontends, opt.CacheKeyStorage, opt.CacheImporter) + + c := &Controller{ + opt: opt, + solver: solver, + } + return c, nil +} + +func (c *Controller) Register(server *grpc.Server) error { + controlapi.RegisterControlServer(server, c) + return nil +} + +func (c *Controller) DiskUsage(ctx context.Context, r *controlapi.DiskUsageRequest) (*controlapi.DiskUsageResponse, error) { + resp := &controlapi.DiskUsageResponse{} + workers, err := c.opt.WorkerController.List() + if err != nil { + return nil, err + } + for _, w := range workers { + du, err := w.DiskUsage(ctx, client.DiskUsageInfo{ + Filter: r.Filter, + }) + if err != nil { + return nil, err + } + + for _, r := range du { + resp.Record = append(resp.Record, &controlapi.UsageRecord{ + // TODO: add worker info + ID: r.ID, + Mutable: r.Mutable, + InUse: r.InUse, + Size_: r.Size, + Parent: r.Parent, + UsageCount: int64(r.UsageCount), + Description: r.Description, + CreatedAt: r.CreatedAt, + LastUsedAt: r.LastUsedAt, + }) + } + } + return resp, nil +} + +func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Control_PruneServer) error { + ch := make(chan client.UsageInfo) + + eg, ctx := errgroup.WithContext(stream.Context()) + workers, err := c.opt.WorkerController.List() + if err != nil { + return errors.Wrap(err, "failed to list workers for prune") + } + + for _, w := range workers { + func(w worker.Worker) { + eg.Go(func() error { + return w.Prune(ctx, ch) + }) + }(w) + } + + eg2, ctx := errgroup.WithContext(stream.Context()) + + eg2.Go(func() error { + defer close(ch) + return eg.Wait() + }) + + eg2.Go(func() error { + for r := range ch { + if err := stream.Send(&controlapi.UsageRecord{ + // TODO: add worker info + ID: r.ID, + Mutable: r.Mutable, + InUse: r.InUse, + Size_: r.Size, + Parent: r.Parent, + UsageCount: int64(r.UsageCount), + Description: r.Description, + CreatedAt: r.CreatedAt, + LastUsedAt: r.LastUsedAt, + }); err != nil { + return err + } + } + return nil + }) + + return eg2.Wait() +} + +func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*controlapi.SolveResponse, error) { + ctx = session.NewContext(ctx, req.Session) + + var expi exporter.ExporterInstance + // TODO: multiworker + // This is actually tricky, as the exporter should come from the worker that has the returned reference. We may need to delay this so that the solver loads this. + w, err := c.opt.WorkerController.GetDefault() + if err != nil { + return nil, err + } + if req.Exporter != "" { + exp, err := w.Exporter(req.Exporter) + if err != nil { + return nil, err + } + expi, err = exp.Resolve(ctx, req.ExporterAttrs) + if err != nil { + return nil, err + } + } + + var cacheExporter *remotecache.RegistryCacheExporter + if ref := req.Cache.ExportRef; ref != "" { + parsed, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + exportCacheRef := reference.TagNameOnly(parsed).String() + cacheExporter = c.opt.CacheExporter.ExporterForTarget(exportCacheRef) + } + + var importCacheRefs []string + for _, ref := range req.Cache.ImportRefs { + parsed, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + importCacheRefs = append(importCacheRefs, reference.TagNameOnly(parsed).String()) + } + + resp, err := c.solver.Solve(ctx, req.Ref, frontend.SolveRequest{ + Frontend: req.Frontend, + Definition: req.Definition, + FrontendOpt: req.FrontendAttrs, + ImportCacheRefs: importCacheRefs, + }, llbsolver.ExporterRequest{ + Exporter: expi, + CacheExporter: cacheExporter, + CacheExportMode: parseCacheExporterOpt(req.Cache.ExportAttrs), + }) + if err != nil { + return nil, err + } + return &controlapi.SolveResponse{ + ExporterResponse: resp.ExporterResponse, + }, nil +} + +func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Control_StatusServer) error { + ch := make(chan *client.SolveStatus, 8) + + eg, ctx := errgroup.WithContext(stream.Context()) + eg.Go(func() error { + return c.solver.Status(ctx, req.Ref, ch) + }) + + eg.Go(func() error { + for { + ss, ok := <-ch + if !ok { + return nil + } + sr := controlapi.StatusResponse{} + for _, v := range ss.Vertexes { + sr.Vertexes = append(sr.Vertexes, &controlapi.Vertex{ + Digest: v.Digest, + Inputs: v.Inputs, + Name: v.Name, + Started: v.Started, + Completed: v.Completed, + Error: v.Error, + Cached: v.Cached, + }) + } + for _, v := range ss.Statuses { + sr.Statuses = append(sr.Statuses, &controlapi.VertexStatus{ + ID: v.ID, + Vertex: v.Vertex, + Name: v.Name, + Current: v.Current, + Total: v.Total, + Timestamp: v.Timestamp, + Started: v.Started, + Completed: v.Completed, + }) + } + for _, v := range ss.Logs { + sr.Logs = append(sr.Logs, &controlapi.VertexLog{ + Vertex: v.Vertex, + Stream: int64(v.Stream), + Msg: v.Data, + Timestamp: v.Timestamp, + }) + } + if err := stream.SendMsg(&sr); err != nil { + return err + } + } + }) + + return eg.Wait() +} + +func (c *Controller) Session(stream controlapi.Control_SessionServer) error { + logrus.Debugf("session started") + conn, closeCh, opts := grpchijack.Hijack(stream) + defer conn.Close() + + ctx, cancel := context.WithCancel(stream.Context()) + go func() { + <-closeCh + cancel() + }() + + err := c.opt.SessionManager.HandleConn(ctx, conn, opts) + logrus.Debugf("session finished: %v", err) + return err +} + +func (c *Controller) ListWorkers(ctx context.Context, r *controlapi.ListWorkersRequest) (*controlapi.ListWorkersResponse, error) { + resp := &controlapi.ListWorkersResponse{} + workers, err := c.opt.WorkerController.List(r.Filter...) + if err != nil { + return nil, err + } + for _, w := range workers { + resp.Record = append(resp.Record, &controlapi.WorkerRecord{ + ID: w.ID(), + Labels: w.Labels(), + }) + } + return resp, nil +} + +func parseCacheExporterOpt(opt map[string]string) solver.CacheExportMode { + for k, v := range opt { + switch k { + case "mode": + switch v { + case "min": + return solver.CacheExportModeMin + case "max": + return solver.CacheExportModeMax + default: + logrus.Debugf("skipping incalid cache export mode: %s", v) + } + default: + logrus.Warnf("skipping invalid cache export opt: %s", v) + } + } + return solver.CacheExportModeMin +} diff --git a/vendor/github.com/moby/buildkit/executor/executor.go b/vendor/github.com/moby/buildkit/executor/executor.go new file mode 100644 index 0000000000..6c89792626 --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/executor.go @@ -0,0 +1,30 @@ +package executor + +import ( + "context" + "io" + + "github.com/moby/buildkit/cache" +) + +type Meta struct { + Args []string + Env []string + User string + Cwd string + Tty bool + ReadonlyRootFS bool + // DisableNetworking bool +} + +type Mount struct { + Src cache.Mountable + Selector string + Dest string + Readonly bool +} + +type Executor interface { + // TODO: add stdout/err + Exec(ctx context.Context, meta Meta, rootfs cache.Mountable, mounts []Mount, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error +} diff --git a/vendor/github.com/moby/buildkit/executor/oci/hosts.go b/vendor/github.com/moby/buildkit/executor/oci/hosts.go new file mode 100644 index 0000000000..b7be99ccf3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/oci/hosts.go @@ -0,0 +1,38 @@ +package oci + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" +) + +const hostsContent = ` +127.0.0.1 localhost +::1 localhost ip6-localhost ip6-loopback +` + +func GetHostsFile(ctx context.Context, stateDir string) (string, error) { + p := filepath.Join(stateDir, "hosts") + _, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) { + _, err := os.Stat(p) + if err == nil { + return "", nil + } + if !os.IsNotExist(err) { + return "", err + } + if err := ioutil.WriteFile(p+".tmp", []byte(hostsContent), 0644); err != nil { + return "", err + } + + if err := os.Rename(p+".tmp", p); err != nil { + return "", err + } + return "", nil + }) + if err != nil { + return "", err + } + return p, nil +} diff --git a/vendor/github.com/moby/buildkit/executor/oci/mounts.go b/vendor/github.com/moby/buildkit/executor/oci/mounts.go new file mode 100644 index 0000000000..a0fe8a9f92 --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/oci/mounts.go @@ -0,0 +1,68 @@ +package oci + +import ( + "context" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// MountOpts sets oci spec specific info for mount points +type MountOpts func([]specs.Mount) []specs.Mount + +//GetMounts returns default required for buildkit +// https://github.com/moby/buildkit/issues/429 +func GetMounts(ctx context.Context, mountOpts ...MountOpts) []specs.Mount { + mounts := []specs.Mount{ + { + Destination: "/proc", + Type: "proc", + Source: "proc", + }, + { + Destination: "/dev", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, + }, + { + Destination: "/dev/pts", + Type: "devpts", + Source: "devpts", + Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, + }, + { + Destination: "/dev/shm", + Type: "tmpfs", + Source: "shm", + Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"}, + }, + { + Destination: "/dev/mqueue", + Type: "mqueue", + Source: "mqueue", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/sys", + Type: "sysfs", + Source: "sysfs", + Options: []string{"nosuid", "noexec", "nodev", "ro"}, + }, + } + for _, o := range mountOpts { + mounts = o(mounts) + } + return mounts +} + +func withROBind(src, dest string) func(m []specs.Mount) []specs.Mount { + return func(m []specs.Mount) []specs.Mount { + m = append(m, specs.Mount{ + Destination: dest, + Type: "bind", + Source: src, + Options: []string{"rbind", "ro"}, + }) + return m + } +} diff --git a/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go b/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go new file mode 100644 index 0000000000..f22eceed22 --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go @@ -0,0 +1,81 @@ +package oci + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/libnetwork/resolvconf" + "github.com/moby/buildkit/util/flightcontrol" +) + +var g flightcontrol.Group +var notFirstRun bool +var lastNotEmpty bool + +func GetResolvConf(ctx context.Context, stateDir string) (string, error) { + p := filepath.Join(stateDir, "resolv.conf") + _, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) { + generate := !notFirstRun + notFirstRun = true + + if !generate { + fi, err := os.Stat(p) + if err != nil { + if !os.IsNotExist(err) { + return "", err + } + generate = true + } + if !generate { + fiMain, err := os.Stat("/etc/resolv.conf") + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + if lastNotEmpty { + generate = true + lastNotEmpty = false + } + } else { + if fi.ModTime().Before(fiMain.ModTime()) { + generate = true + } + } + } + } + + if !generate { + return "", nil + } + + var dt []byte + f, err := resolvconf.Get() + if err != nil { + if !os.IsNotExist(err) { + return "", err + } + } else { + dt = f.Content + } + + f, err = resolvconf.FilterResolvDNS(dt, true) + if err != nil { + return "", err + } + + if err := ioutil.WriteFile(p+".tmp", f.Content, 0644); err != nil { + return "", err + } + + if err := os.Rename(p+".tmp", p); err != nil { + return "", err + } + return "", nil + }) + if err != nil { + return "", err + } + return p, nil +} diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go b/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go new file mode 100644 index 0000000000..c628b5cab1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go @@ -0,0 +1,163 @@ +// +build !windows + +package oci + +import ( + "context" + "path" + "sync" + + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/oci" + "github.com/mitchellh/hashstructure" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/snapshot" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// Ideally we don't have to import whole containerd just for the default spec + +// GenerateSpec generates spec using containerd functionality. +func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, opts ...oci.SpecOpts) (*specs.Spec, func(), error) { + c := &containers.Container{ + ID: id, + } + _, ok := namespaces.Namespace(ctx) + if !ok { + ctx = namespaces.WithNamespace(ctx, "buildkit") + } + + opts = append(opts, + oci.WithHostNamespace(specs.NetworkNamespace), + ) + + // Note that containerd.GenerateSpec is namespaced so as to make + // specs.Linux.CgroupsPath namespaced + s, err := oci.GenerateSpec(ctx, nil, c, opts...) + if err != nil { + return nil, nil, err + } + s.Process.Args = meta.Args + s.Process.Env = meta.Env + s.Process.Cwd = meta.Cwd + + s.Mounts = GetMounts(ctx, + withROBind(resolvConf, "/etc/resolv.conf"), + withROBind(hostsFile, "/etc/hosts"), + ) + // TODO: User + + sm := &submounts{} + + var releasers []func() error + releaseAll := func() { + sm.cleanup() + for _, f := range releasers { + f() + } + } + + for _, m := range mounts { + if m.Src == nil { + return nil, nil, errors.Errorf("mount %s has no source", m.Dest) + } + mountable, err := m.Src.Mount(ctx, m.Readonly) + if err != nil { + releaseAll() + return nil, nil, errors.Wrapf(err, "failed to mount %s", m.Dest) + } + mounts, err := mountable.Mount() + if err != nil { + releaseAll() + return nil, nil, errors.WithStack(err) + } + releasers = append(releasers, mountable.Release) + for _, mount := range mounts { + mount, err = sm.subMount(mount, m.Selector) + if err != nil { + releaseAll() + return nil, nil, err + } + s.Mounts = append(s.Mounts, specs.Mount{ + Destination: m.Dest, + Type: mount.Type, + Source: mount.Source, + Options: mount.Options, + }) + } + } + + return s, releaseAll, nil +} + +type mountRef struct { + mount mount.Mount + unmount func() error +} + +type submounts struct { + m map[uint64]mountRef +} + +func (s *submounts) subMount(m mount.Mount, subPath string) (mount.Mount, error) { + if path.Join("/", subPath) == "/" { + return m, nil + } + if s.m == nil { + s.m = map[uint64]mountRef{} + } + h, err := hashstructure.Hash(m, nil) + if err != nil { + return mount.Mount{}, nil + } + if mr, ok := s.m[h]; ok { + return sub(mr.mount, subPath), nil + } + + lm := snapshot.LocalMounterWithMounts([]mount.Mount{m}) + + mp, err := lm.Mount() + if err != nil { + return mount.Mount{}, err + } + + opts := []string{"rbind"} + for _, opt := range m.Options { + if opt == "ro" { + opts = append(opts, opt) + } + } + + s.m[h] = mountRef{ + mount: mount.Mount{ + Source: mp, + Type: "bind", + Options: opts, + }, + unmount: lm.Unmount, + } + + return sub(s.m[h].mount, subPath), nil +} + +func (s *submounts) cleanup() { + var wg sync.WaitGroup + wg.Add(len(s.m)) + for _, m := range s.m { + func(m mountRef) { + go func() { + m.unmount() + wg.Done() + }() + }(m) + } + wg.Wait() +} + +func sub(m mount.Mount, subPath string) mount.Mount { + m.Source = path.Join(m.Source, subPath) + return m +} diff --git a/vendor/github.com/moby/buildkit/executor/oci/user.go b/vendor/github.com/moby/buildkit/executor/oci/user.go new file mode 100644 index 0000000000..ce755f18a2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/oci/user.go @@ -0,0 +1,86 @@ +package oci + +import ( + "context" + "os" + "strconv" + "strings" + + "github.com/containerd/continuity/fs" + "github.com/opencontainers/runc/libcontainer/user" +) + +func GetUser(ctx context.Context, root, username string) (uint32, uint32, error) { + // fast path from uid/gid + if uid, gid, err := ParseUser(username); err == nil { + return uid, gid, nil + } + + passwdPath, err := user.GetPasswdPath() + if err != nil { + return 0, 0, err + } + groupPath, err := user.GetGroupPath() + if err != nil { + return 0, 0, err + } + passwdFile, err := openUserFile(root, passwdPath) + if err == nil { + defer passwdFile.Close() + } + groupFile, err := openUserFile(root, groupPath) + if err == nil { + defer groupFile.Close() + } + + execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile) + if err != nil { + return 0, 0, err + } + + return uint32(execUser.Uid), uint32(execUser.Gid), nil +} + +func ParseUser(str string) (uid uint32, gid uint32, err error) { + if str == "" { + return 0, 0, nil + } + parts := strings.SplitN(str, ":", 2) + for i, v := range parts { + switch i { + case 0: + uid, err = parseUID(v) + if err != nil { + return 0, 0, err + } + if len(parts) == 1 { + gid = uid + } + case 1: + gid, err = parseUID(v) + if err != nil { + return 0, 0, err + } + } + } + return +} + +func openUserFile(root, p string) (*os.File, error) { + p, err := fs.RootPath(root, p) + if err != nil { + return nil, err + } + return os.Open(p) +} + +func parseUID(str string) (uint32, error) { + if str == "root" { + return 0, nil + } + uid, err := strconv.ParseUint(str, 10, 32) + if err != nil { + return 0, err + } + return uint32(uid), nil +} diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go new file mode 100644 index 0000000000..edffb5bf58 --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go @@ -0,0 +1,249 @@ +package runcexecutor + +import ( + "context" + "encoding/json" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "syscall" + + "github.com/containerd/containerd/contrib/seccomp" + "github.com/containerd/containerd/mount" + containerdoci "github.com/containerd/containerd/oci" + "github.com/containerd/continuity/fs" + runc "github.com/containerd/go-runc" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/executor/oci" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/util/libcontainer_specconv" + "github.com/moby/buildkit/util/system" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type Opt struct { + // root directory + Root string + CommandCandidates []string + // without root privileges (has nothing to do with Opt.Root directory) + Rootless bool +} + +var defaultCommandCandidates = []string{"buildkit-runc", "runc"} + +type runcExecutor struct { + runc *runc.Runc + root string + cmd string + rootless bool +} + +func New(opt Opt) (executor.Executor, error) { + cmds := opt.CommandCandidates + if cmds == nil { + cmds = defaultCommandCandidates + } + + var cmd string + var found bool + for _, cmd = range cmds { + if _, err := exec.LookPath(cmd); err == nil { + found = true + break + } + } + if !found { + return nil, errors.Errorf("failed to find %s binary", cmd) + } + + root := opt.Root + + if err := os.MkdirAll(root, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to create %s", root) + } + + root, err := filepath.Abs(root) + if err != nil { + return nil, err + } + root, err = filepath.EvalSymlinks(root) + if err != nil { + return nil, err + } + + runtime := &runc.Runc{ + Command: cmd, + Log: filepath.Join(root, "runc-log.json"), + LogFormat: runc.JSON, + PdeathSignal: syscall.SIGKILL, + Setpgid: true, + } + + w := &runcExecutor{ + runc: runtime, + root: root, + rootless: opt.Rootless, + } + return w, nil +} + +func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache.Mountable, mounts []executor.Mount, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error { + + resolvConf, err := oci.GetResolvConf(ctx, w.root) + if err != nil { + return err + } + + hostsFile, err := oci.GetHostsFile(ctx, w.root) + if err != nil { + return err + } + + mountable, err := root.Mount(ctx, false) + if err != nil { + return err + } + + rootMount, err := mountable.Mount() + if err != nil { + return err + } + defer mountable.Release() + + id := identity.NewID() + bundle := filepath.Join(w.root, id) + + if err := os.Mkdir(bundle, 0700); err != nil { + return err + } + defer os.RemoveAll(bundle) + rootFSPath := filepath.Join(bundle, "rootfs") + if err := os.Mkdir(rootFSPath, 0700); err != nil { + return err + } + if err := mount.All(rootMount, rootFSPath); err != nil { + return err + } + defer mount.Unmount(rootFSPath, 0) + + uid, gid, err := oci.GetUser(ctx, rootFSPath, meta.User) + if err != nil { + return err + } + + f, err := os.Create(filepath.Join(bundle, "config.json")) + if err != nil { + return err + } + defer f.Close() + opts := []containerdoci.SpecOpts{containerdoci.WithUIDGID(uid, gid)} + if system.SeccompSupported() { + opts = append(opts, seccomp.WithDefaultProfile()) + } + if meta.ReadonlyRootFS { + opts = append(opts, containerdoci.WithRootFSReadonly()) + } + spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, opts...) + if err != nil { + return err + } + defer cleanup() + + spec.Root.Path = rootFSPath + if _, ok := root.(cache.ImmutableRef); ok { // TODO: pass in with mount, not ref type + spec.Root.Readonly = true + } + + newp, err := fs.RootPath(rootFSPath, meta.Cwd) + if err != nil { + return errors.Wrapf(err, "working dir %s points to invalid target", newp) + } + if err := os.MkdirAll(newp, 0700); err != nil { + return errors.Wrapf(err, "failed to create working directory %s", newp) + } + + if w.rootless { + specconv.ToRootless(spec, &specconv.RootlessOpts{ + MapSubUIDGID: true, + }) + // TODO(AkihiroSuda): keep Cgroups enabled if /sys/fs/cgroup/cpuset/buildkit exists and writable + spec.Linux.CgroupsPath = "" + // TODO(AkihiroSuda): ToRootless removes netns, but we should readd netns here + // if either SUID or userspace NAT is configured on the host. + if err := setOOMScoreAdj(spec); err != nil { + return err + } + } + + if err := json.NewEncoder(f).Encode(spec); err != nil { + return err + } + + logrus.Debugf("> running %s %v", id, meta.Args) + + status, err := w.runc.Run(ctx, id, bundle, &runc.CreateOpts{ + IO: &forwardIO{stdin: stdin, stdout: stdout, stderr: stderr}, + }) + logrus.Debugf("< completed %s %v %v", id, status, err) + if status != 0 { + select { + case <-ctx.Done(): + // runc can't report context.Cancelled directly + return errors.Wrapf(ctx.Err(), "exit code %d", status) + default: + } + return errors.Errorf("exit code %d", status) + } + + return err +} + +type forwardIO struct { + stdin io.ReadCloser + stdout, stderr io.WriteCloser +} + +func (s *forwardIO) Close() error { + return nil +} + +func (s *forwardIO) Set(cmd *exec.Cmd) { + cmd.Stdin = s.stdin + cmd.Stdout = s.stdout + cmd.Stderr = s.stderr +} + +func (s *forwardIO) Stdin() io.WriteCloser { + return nil +} + +func (s *forwardIO) Stdout() io.ReadCloser { + return nil +} + +func (s *forwardIO) Stderr() io.ReadCloser { + return nil +} + +// setOOMScoreAdj comes from https://github.com/genuinetools/img/blob/2fabe60b7dc4623aa392b515e013bbc69ad510ab/executor/runc/executor.go#L182-L192 +func setOOMScoreAdj(spec *specs.Spec) error { + // Set the oom_score_adj of our children containers to that of the current process. + b, err := ioutil.ReadFile("/proc/self/oom_score_adj") + if err != nil { + return errors.Wrap(err, "failed to read /proc/self/oom_score_adj") + } + s := strings.TrimSpace(string(b)) + oom, err := strconv.Atoi(s) + if err != nil { + return errors.Wrapf(err, "failed to parse %s as int", s) + } + spec.Process.OOMScoreAdj = &oom + return nil +} diff --git a/vendor/github.com/moby/buildkit/exporter/exporter.go b/vendor/github.com/moby/buildkit/exporter/exporter.go new file mode 100644 index 0000000000..48c6a3bc0d --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/exporter.go @@ -0,0 +1,16 @@ +package exporter + +import ( + "context" + + "github.com/moby/buildkit/cache" +) + +type Exporter interface { + Resolve(context.Context, map[string]string) (ExporterInstance, error) +} + +type ExporterInstance interface { + Name() string + Export(context.Context, cache.ImmutableRef, map[string][]byte) (map[string]string, error) +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go new file mode 100644 index 0000000000..9aee2a778d --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go @@ -0,0 +1,276 @@ +package builder + +import ( + "archive/tar" + "bytes" + "context" + "encoding/json" + "regexp" + "strings" + + "github.com/docker/docker/builder/dockerignore" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb" + "github.com/moby/buildkit/frontend/gateway/client" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +const ( + LocalNameContext = "context" + LocalNameDockerfile = "dockerfile" + keyTarget = "target" + keyFilename = "filename" + keyCacheFrom = "cache-from" + exporterImageConfig = "containerimage.config" + defaultDockerfileName = "Dockerfile" + dockerignoreFilename = ".dockerignore" + buildArgPrefix = "build-arg:" + labelPrefix = "label:" + keyNoCache = "no-cache" +) + +var httpPrefix = regexp.MustCompile("^https?://") +var gitUrlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") + +func Build(ctx context.Context, c client.Client) error { + opts := c.Opts() + + filename := opts[keyFilename] + if filename == "" { + filename = defaultDockerfileName + } + + var ignoreCache []string + if v, ok := opts[keyNoCache]; ok { + if v == "" { + ignoreCache = []string{} // means all stages + } else { + ignoreCache = strings.Split(v, ",") + } + } + + src := llb.Local(LocalNameDockerfile, + llb.IncludePatterns([]string{filename}), + llb.SessionID(c.SessionID()), + llb.SharedKeyHint(defaultDockerfileName), + ) + var buildContext *llb.State + isScratchContext := false + if st, ok := detectGitContext(opts[LocalNameContext]); ok { + src = *st + buildContext = &src + } else if httpPrefix.MatchString(opts[LocalNameContext]) { + httpContext := llb.HTTP(opts[LocalNameContext], llb.Filename("context")) + def, err := httpContext.Marshal() + if err != nil { + return err + } + ref, err := c.Solve(ctx, client.SolveRequest{ + Definition: def.ToPB(), + }, nil, false) + if err != nil { + return err + } + + dt, err := ref.ReadFile(ctx, client.ReadRequest{ + Filename: "context", + Range: &client.FileRange{ + Length: 1024, + }, + }) + if err != nil { + return err + } + if isArchive(dt) { + unpack := llb.Image(dockerfile2llb.CopyImage). + Run(llb.Shlex("copy --unpack /src/context /out/"), llb.ReadonlyRootFS()) + unpack.AddMount("/src", httpContext, llb.Readonly) + src = unpack.AddMount("/out", llb.Scratch()) + buildContext = &src + } else { + filename = "context" + src = httpContext + buildContext = &src + isScratchContext = true + } + } + + def, err := src.Marshal() + if err != nil { + return err + } + + eg, ctx2 := errgroup.WithContext(ctx) + var dtDockerfile []byte + eg.Go(func() error { + ref, err := c.Solve(ctx2, client.SolveRequest{ + Definition: def.ToPB(), + }, nil, false) + if err != nil { + return err + } + + dtDockerfile, err = ref.ReadFile(ctx2, client.ReadRequest{ + Filename: filename, + }) + if err != nil { + return err + } + return nil + }) + var excludes []string + if !isScratchContext { + eg.Go(func() error { + dockerignoreState := buildContext + if dockerignoreState == nil { + st := llb.Local(LocalNameContext, + llb.SessionID(c.SessionID()), + llb.IncludePatterns([]string{dockerignoreFilename}), + llb.SharedKeyHint(dockerignoreFilename), + ) + dockerignoreState = &st + } + def, err := dockerignoreState.Marshal() + if err != nil { + return err + } + ref, err := c.Solve(ctx2, client.SolveRequest{ + Definition: def.ToPB(), + }, nil, false) + if err != nil { + return err + } + dtDockerignore, err := ref.ReadFile(ctx2, client.ReadRequest{ + Filename: dockerignoreFilename, + }) + if err == nil { + excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dtDockerignore)) + if err != nil { + return errors.Wrap(err, "failed to parse dockerignore") + } + } + return nil + }) + } + + if err := eg.Wait(); err != nil { + return err + } + + if _, ok := c.Opts()["cmdline"]; !ok { + ref, cmdline, ok := dockerfile2llb.DetectSyntax(bytes.NewBuffer(dtDockerfile)) + if ok { + return forwardGateway(ctx, c, ref, cmdline) + } + } + + st, img, err := dockerfile2llb.Dockerfile2LLB(ctx, dtDockerfile, dockerfile2llb.ConvertOpt{ + Target: opts[keyTarget], + MetaResolver: c, + BuildArgs: filter(opts, buildArgPrefix), + Labels: filter(opts, labelPrefix), + SessionID: c.SessionID(), + BuildContext: buildContext, + Excludes: excludes, + IgnoreCache: ignoreCache, + }) + + if err != nil { + return err + } + + def, err = st.Marshal() + if err != nil { + return err + } + + config, err := json.Marshal(img) + if err != nil { + return err + } + + var cacheFrom []string + if cacheFromStr := opts[keyCacheFrom]; cacheFromStr != "" { + cacheFrom = strings.Split(cacheFromStr, ",") + } + + _, err = c.Solve(ctx, client.SolveRequest{ + Definition: def.ToPB(), + ImportCacheRefs: cacheFrom, + }, map[string][]byte{ + exporterImageConfig: config, + }, true) + if err != nil { + return err + } + return nil +} + +func forwardGateway(ctx context.Context, c client.Client, ref string, cmdline string) error { + opts := c.Opts() + if opts == nil { + opts = map[string]string{} + } + opts["cmdline"] = cmdline + opts["source"] = ref + _, err := c.Solve(ctx, client.SolveRequest{ + Frontend: "gateway.v0", + FrontendOpt: opts, + }, nil, true) + return err +} + +func filter(opt map[string]string, key string) map[string]string { + m := map[string]string{} + for k, v := range opt { + if strings.HasPrefix(k, key) { + m[strings.TrimPrefix(k, key)] = v + } + } + return m +} + +func detectGitContext(ref string) (*llb.State, bool) { + found := false + if httpPrefix.MatchString(ref) && gitUrlPathWithFragmentSuffix.MatchString(ref) { + found = true + } + + for _, prefix := range []string{"git://", "github.com/", "git@"} { + if strings.HasPrefix(ref, prefix) { + found = true + break + } + } + if !found { + return nil, false + } + + parts := strings.SplitN(ref, "#", 2) + branch := "" + if len(parts) > 1 { + branch = parts[1] + } + st := llb.Git(parts[0], branch) + return &st, true +} + +func isArchive(header []byte) bool { + for _, m := range [][]byte{ + {0x42, 0x5A, 0x68}, // bzip2 + {0x1F, 0x8B, 0x08}, // gzip + {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, // xz + } { + if len(header) < len(m) { + continue + } + if bytes.Equal(m, header[:len(m)]) { + return true + } + } + + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile.go new file mode 100644 index 0000000000..7c0e192318 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile.go @@ -0,0 +1,41 @@ +package dockerfile + +import ( + "context" + + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/frontend/dockerfile/builder" + "github.com/moby/buildkit/solver" +) + +func NewDockerfileFrontend() frontend.Frontend { + return &dfFrontend{} +} + +type dfFrontend struct{} + +func (f *dfFrontend) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (retRef solver.CachedResult, exporterAttr map[string][]byte, retErr error) { + + c, err := llbBridgeToGatewayClient(ctx, llbBridge, opts) + if err != nil { + return nil, nil, err + } + + defer func() { + for _, r := range c.refs { + if r != nil && (c.final != r || retErr != nil) { + r.Release(context.TODO()) + } + } + }() + + if err := builder.Build(ctx, c); err != nil { + return nil, nil, err + } + + if c.final == nil || c.final.CachedResult == nil { + return nil, c.exporterAttr, nil + } + + return c.final, c.exporterAttr, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go new file mode 100644 index 0000000000..756d7e1605 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go @@ -0,0 +1,932 @@ +package dockerfile2llb + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/url" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/pkg/signal" + "github.com/docker/go-connections/nat" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/client/llb/imagemetaresolver" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/frontend/dockerfile/parser" + "github.com/moby/buildkit/frontend/dockerfile/shell" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +const ( + emptyImageName = "scratch" + localNameContext = "context" + historyComment = "buildkit.dockerfile.v0" + + CopyImage = "tonistiigi/copy:v0.1.3@sha256:87c46e7b413cdd2c2702902b481b390ce263ac9d942253d366f3b1a3c16f96d6" +) + +type ConvertOpt struct { + Target string + MetaResolver llb.ImageMetaResolver + BuildArgs map[string]string + Labels map[string]string + SessionID string + BuildContext *llb.State + Excludes []string + // IgnoreCache contains names of the stages that should not use build cache. + // Empty slice means ignore cache for all stages. Nil doesn't disable cache. + IgnoreCache []string + // CacheIDNamespace scopes the IDs for different cache mounts + CacheIDNamespace string +} + +func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, error) { + if len(dt) == 0 { + return nil, nil, errors.Errorf("the Dockerfile cannot be empty") + } + + dockerfile, err := parser.Parse(bytes.NewReader(dt)) + if err != nil { + return nil, nil, err + } + + proxyEnv := proxyEnvFromBuildArgs(opt.BuildArgs) + + stages, metaArgs, err := instructions.Parse(dockerfile.AST) + if err != nil { + return nil, nil, err + } + + for i := range metaArgs { + metaArgs[i] = setBuildArgValue(metaArgs[i], opt.BuildArgs) + } + + shlex := shell.NewLex(dockerfile.EscapeToken) + + metaResolver := opt.MetaResolver + if metaResolver == nil { + metaResolver = imagemetaresolver.Default() + } + + var allDispatchStates []*dispatchState + dispatchStatesByName := map[string]*dispatchState{} + + // set base state for every image + for _, st := range stages { + name, err := shlex.ProcessWord(st.BaseName, toEnvList(metaArgs, nil)) + if err != nil { + return nil, nil, err + } + st.BaseName = name + + ds := &dispatchState{ + stage: st, + deps: make(map[*dispatchState]struct{}), + ctxPaths: make(map[string]struct{}), + } + if d, ok := dispatchStatesByName[st.BaseName]; ok { + ds.base = d + } + allDispatchStates = append(allDispatchStates, ds) + if st.Name != "" { + dispatchStatesByName[strings.ToLower(st.Name)] = ds + } + if opt.IgnoreCache != nil { + if len(opt.IgnoreCache) == 0 { + ds.ignoreCache = true + } else if st.Name != "" { + for _, n := range opt.IgnoreCache { + if strings.EqualFold(n, st.Name) { + ds.ignoreCache = true + } + } + } + } + } + + var target *dispatchState + if opt.Target == "" { + target = allDispatchStates[len(allDispatchStates)-1] + } else { + var ok bool + target, ok = dispatchStatesByName[strings.ToLower(opt.Target)] + if !ok { + return nil, nil, errors.Errorf("target stage %s could not be found", opt.Target) + } + } + + // fill dependencies to stages so unreachable ones can avoid loading image configs + for _, d := range allDispatchStates { + d.commands = make([]command, len(d.stage.Commands)) + for i, cmd := range d.stage.Commands { + newCmd, err := toCommand(cmd, dispatchStatesByName, allDispatchStates) + if err != nil { + return nil, nil, err + } + d.commands[i] = newCmd + for _, src := range newCmd.sources { + if src != nil { + d.deps[src] = struct{}{} + if src.unregistered { + allDispatchStates = append(allDispatchStates, src) + } + } + } + } + } + + eg, ctx := errgroup.WithContext(ctx) + for i, d := range allDispatchStates { + reachable := isReachable(target, d) + // resolve image config for every stage + if d.base == nil { + if d.stage.BaseName == emptyImageName { + d.state = llb.Scratch() + d.image = emptyImage() + continue + } + func(i int, d *dispatchState) { + eg.Go(func() error { + ref, err := reference.ParseNormalizedNamed(d.stage.BaseName) + if err != nil { + return err + } + d.stage.BaseName = reference.TagNameOnly(ref).String() + var isScratch bool + if metaResolver != nil && reachable { + dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName) + if err == nil { // handle the error while builder is actually running + var img Image + if err := json.Unmarshal(dt, &img); err != nil { + return err + } + img.Created = nil + d.image = img + if dgst != "" { + ref, err = reference.WithDigest(ref, dgst) + if err != nil { + return err + } + } + d.stage.BaseName = ref.String() + _ = ref + if len(img.RootFS.DiffIDs) == 0 { + isScratch = true + } + } + } + if isScratch { + d.state = llb.Scratch() + } else { + d.state = llb.Image(d.stage.BaseName, dfCmd(d.stage.SourceCode)) + } + return nil + }) + }(i, d) + } + } + + if err := eg.Wait(); err != nil { + return nil, nil, err + } + + buildContext := &mutableOutput{} + ctxPaths := map[string]struct{}{} + + for _, d := range allDispatchStates { + if !isReachable(target, d) { + continue + } + if d.base != nil { + d.state = d.base.state + d.image = clone(d.base.image) + } + + // initialize base metadata from image conf + for _, env := range d.image.Config.Env { + parts := strings.SplitN(env, "=", 2) + v := "" + if len(parts) > 1 { + v = parts[1] + } + if err := dispatchEnv(d, &instructions.EnvCommand{Env: []instructions.KeyValuePair{{Key: parts[0], Value: v}}}, false); err != nil { + return nil, nil, err + } + } + if d.image.Config.WorkingDir != "" { + if err = dispatchWorkdir(d, &instructions.WorkdirCommand{Path: d.image.Config.WorkingDir}, false); err != nil { + return nil, nil, err + } + } + if d.image.Config.User != "" { + if err = dispatchUser(d, &instructions.UserCommand{User: d.image.Config.User}, false); err != nil { + return nil, nil, err + } + } + + opt := dispatchOpt{ + allDispatchStates: allDispatchStates, + dispatchStatesByName: dispatchStatesByName, + metaArgs: metaArgs, + buildArgValues: opt.BuildArgs, + shlex: shlex, + sessionID: opt.SessionID, + buildContext: llb.NewState(buildContext), + proxyEnv: proxyEnv, + cacheIDNamespace: opt.CacheIDNamespace, + } + + if err = dispatchOnBuild(d, d.image.Config.OnBuild, opt); err != nil { + return nil, nil, err + } + + for _, cmd := range d.commands { + if err := dispatch(d, cmd, opt); err != nil { + return nil, nil, err + } + } + + for p := range d.ctxPaths { + ctxPaths[p] = struct{}{} + } + } + + if len(opt.Labels) != 0 && target.image.Config.Labels == nil { + target.image.Config.Labels = make(map[string]string, len(opt.Labels)) + } + for k, v := range opt.Labels { + target.image.Config.Labels[k] = v + } + + opts := []llb.LocalOption{ + llb.SessionID(opt.SessionID), + llb.ExcludePatterns(opt.Excludes), + llb.SharedKeyHint(localNameContext), + } + if includePatterns := normalizeContextPaths(ctxPaths); includePatterns != nil { + opts = append(opts, llb.FollowPaths(includePatterns)) + } + bc := llb.Local(localNameContext, opts...) + if opt.BuildContext != nil { + bc = *opt.BuildContext + } + buildContext.Output = bc.Output() + + return &target.state, &target.image, nil +} + +func toCommand(ic instructions.Command, dispatchStatesByName map[string]*dispatchState, allDispatchStates []*dispatchState) (command, error) { + cmd := command{Command: ic} + if c, ok := ic.(*instructions.CopyCommand); ok { + if c.From != "" { + var stn *dispatchState + index, err := strconv.Atoi(c.From) + if err != nil { + stn, ok = dispatchStatesByName[strings.ToLower(c.From)] + if !ok { + stn = &dispatchState{ + stage: instructions.Stage{BaseName: c.From}, + deps: make(map[*dispatchState]struct{}), + unregistered: true, + } + } + } else { + if index < 0 || index >= len(allDispatchStates) { + return command{}, errors.Errorf("invalid stage index %d", index) + } + stn = allDispatchStates[index] + } + cmd.sources = []*dispatchState{stn} + } + } + + if ok := detectRunMount(&cmd, dispatchStatesByName, allDispatchStates); ok { + return cmd, nil + } + + return cmd, nil +} + +type dispatchOpt struct { + allDispatchStates []*dispatchState + dispatchStatesByName map[string]*dispatchState + metaArgs []instructions.ArgCommand + buildArgValues map[string]string + shlex *shell.Lex + sessionID string + buildContext llb.State + proxyEnv *llb.ProxyEnv + cacheIDNamespace string +} + +func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error { + if ex, ok := cmd.Command.(instructions.SupportsSingleWordExpansion); ok { + err := ex.Expand(func(word string) (string, error) { + return opt.shlex.ProcessWord(word, toEnvList(d.buildArgs, d.image.Config.Env)) + }) + if err != nil { + return err + } + } + + var err error + switch c := cmd.Command.(type) { + case *instructions.MaintainerCommand: + err = dispatchMaintainer(d, c) + case *instructions.EnvCommand: + err = dispatchEnv(d, c, true) + case *instructions.RunCommand: + err = dispatchRun(d, c, opt.proxyEnv, cmd.sources, opt) + case *instructions.WorkdirCommand: + err = dispatchWorkdir(d, c, true) + case *instructions.AddCommand: + err = dispatchCopy(d, c.SourcesAndDest, opt.buildContext, true, c, "") + if err == nil { + for _, src := range c.Sources() { + d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{} + } + } + case *instructions.LabelCommand: + err = dispatchLabel(d, c) + case *instructions.OnbuildCommand: + err = dispatchOnbuild(d, c) + case *instructions.CmdCommand: + err = dispatchCmd(d, c) + case *instructions.EntrypointCommand: + err = dispatchEntrypoint(d, c) + case *instructions.HealthCheckCommand: + err = dispatchHealthcheck(d, c) + case *instructions.ExposeCommand: + err = dispatchExpose(d, c, opt.shlex) + case *instructions.UserCommand: + err = dispatchUser(d, c, true) + case *instructions.VolumeCommand: + err = dispatchVolume(d, c) + case *instructions.StopSignalCommand: + err = dispatchStopSignal(d, c) + case *instructions.ShellCommand: + err = dispatchShell(d, c) + case *instructions.ArgCommand: + err = dispatchArg(d, c, opt.metaArgs, opt.buildArgValues) + case *instructions.CopyCommand: + l := opt.buildContext + if len(cmd.sources) != 0 { + l = cmd.sources[0].state + } + err = dispatchCopy(d, c.SourcesAndDest, l, false, c, c.Chown) + if err == nil && len(cmd.sources) == 0 { + for _, src := range c.Sources() { + d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{} + } + } + default: + } + return err +} + +type dispatchState struct { + state llb.State + image Image + stage instructions.Stage + base *dispatchState + deps map[*dispatchState]struct{} + buildArgs []instructions.ArgCommand + commands []command + ctxPaths map[string]struct{} + ignoreCache bool + cmdSet bool + unregistered bool +} + +type command struct { + instructions.Command + sources []*dispatchState +} + +func dispatchOnBuild(d *dispatchState, triggers []string, opt dispatchOpt) error { + for _, trigger := range triggers { + ast, err := parser.Parse(strings.NewReader(trigger)) + if err != nil { + return err + } + if len(ast.AST.Children) != 1 { + return errors.New("onbuild trigger should be a single expression") + } + ic, err := instructions.ParseCommand(ast.AST.Children[0]) + if err != nil { + return err + } + cmd, err := toCommand(ic, opt.dispatchStatesByName, opt.allDispatchStates) + if err != nil { + return err + } + if err := dispatch(d, cmd, opt); err != nil { + return err + } + } + return nil +} + +func dispatchEnv(d *dispatchState, c *instructions.EnvCommand, commit bool) error { + commitMessage := bytes.NewBufferString("ENV") + for _, e := range c.Env { + commitMessage.WriteString(" " + e.String()) + d.state = d.state.AddEnv(e.Key, e.Value) + d.image.Config.Env = addEnv(d.image.Config.Env, e.Key, e.Value, true) + } + if commit { + return commitToHistory(&d.image, commitMessage.String(), false, nil) + } + return nil +} + +func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyEnv, sources []*dispatchState, dopt dispatchOpt) error { + var args []string = c.CmdLine + if c.PrependShell { + args = withShell(d.image, args) + } else if d.image.Config.Entrypoint != nil { + args = append(d.image.Config.Entrypoint, args...) + } + opt := []llb.RunOption{llb.Args(args)} + for _, arg := range d.buildArgs { + opt = append(opt, llb.AddEnv(arg.Key, getArgValue(arg))) + } + opt = append(opt, dfCmd(c)) + if d.ignoreCache { + opt = append(opt, llb.IgnoreCache) + } + if proxy != nil { + opt = append(opt, llb.WithProxy(*proxy)) + } + + opt = append(opt, dispatchRunMounts(d, c, sources, dopt)...) + + d.state = d.state.Run(opt...).Root() + return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs), true, &d.state) +} + +func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bool) error { + d.state = d.state.Dir(c.Path) + wd := c.Path + if !path.IsAbs(c.Path) { + wd = path.Join("/", d.image.Config.WorkingDir, wd) + } + d.image.Config.WorkingDir = wd + if commit { + return commitToHistory(&d.image, "WORKDIR "+wd, false, nil) + } + return nil +} + +func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint interface{}, chown string) error { + // TODO: this should use CopyOp instead. Current implementation is inefficient + img := llb.Image(CopyImage) + + dest := path.Join(".", pathRelativeToWorkingDir(d.state, c.Dest())) + if c.Dest() == "." || c.Dest()[len(c.Dest())-1] == filepath.Separator { + dest += string(filepath.Separator) + } + args := []string{"copy"} + unpack := isAddCommand + + mounts := make([]llb.RunOption, 0, len(c.Sources())) + if chown != "" { + args = append(args, fmt.Sprintf("--chown=%s", chown)) + _, _, err := parseUser(chown) + if err != nil { + mounts = append(mounts, llb.AddMount("/etc/passwd", d.state, llb.SourcePath("/etc/passwd"), llb.Readonly)) + mounts = append(mounts, llb.AddMount("/etc/group", d.state, llb.SourcePath("/etc/group"), llb.Readonly)) + } + } + + commitMessage := bytes.NewBufferString("") + if isAddCommand { + commitMessage.WriteString("ADD") + } else { + commitMessage.WriteString("COPY") + } + + for i, src := range c.Sources() { + commitMessage.WriteString(" " + src) + if isAddCommand && (strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://")) { + // Resources from remote URLs are not decompressed. + // https://docs.docker.com/engine/reference/builder/#add + // + // Note: mixing up remote archives and local archives in a single ADD instruction + // would result in undefined behavior: https://github.com/moby/buildkit/pull/387#discussion_r189494717 + unpack = false + u, err := url.Parse(src) + f := "__unnamed__" + if err == nil { + if base := path.Base(u.Path); base != "." && base != "/" { + f = base + } + } + target := path.Join(fmt.Sprintf("/src-%d", i), f) + args = append(args, target) + mounts = append(mounts, llb.AddMount(path.Dir(target), llb.HTTP(src, llb.Filename(f), dfCmd(c)), llb.Readonly)) + } else { + d, f := splitWildcards(src) + targetCmd := fmt.Sprintf("/src-%d", i) + targetMount := targetCmd + if f == "" { + f = path.Base(src) + targetMount = path.Join(targetMount, f) + } + targetCmd = path.Join(targetCmd, f) + args = append(args, targetCmd) + mounts = append(mounts, llb.AddMount(targetMount, sourceState, llb.SourcePath(d), llb.Readonly)) + } + } + + commitMessage.WriteString(" " + c.Dest()) + + args = append(args, dest) + if unpack { + args = append(args[:1], append([]string{"--unpack"}, args[1:]...)...) + } + + opt := []llb.RunOption{llb.Args(args), llb.Dir("/dest"), llb.ReadonlyRootFS(), dfCmd(cmdToPrint)} + if d.ignoreCache { + opt = append(opt, llb.IgnoreCache) + } + run := img.Run(append(opt, mounts...)...) + d.state = run.AddMount("/dest", d.state) + + return commitToHistory(&d.image, commitMessage.String(), true, &d.state) +} + +func dispatchMaintainer(d *dispatchState, c *instructions.MaintainerCommand) error { + d.image.Author = c.Maintainer + return commitToHistory(&d.image, fmt.Sprintf("MAINTAINER %v", c.Maintainer), false, nil) +} + +func dispatchLabel(d *dispatchState, c *instructions.LabelCommand) error { + commitMessage := bytes.NewBufferString("LABEL") + if d.image.Config.Labels == nil { + d.image.Config.Labels = make(map[string]string, len(c.Labels)) + } + for _, v := range c.Labels { + d.image.Config.Labels[v.Key] = v.Value + commitMessage.WriteString(" " + v.String()) + } + return commitToHistory(&d.image, commitMessage.String(), false, nil) +} + +func dispatchOnbuild(d *dispatchState, c *instructions.OnbuildCommand) error { + d.image.Config.OnBuild = append(d.image.Config.OnBuild, c.Expression) + return nil +} + +func dispatchCmd(d *dispatchState, c *instructions.CmdCommand) error { + var args []string = c.CmdLine + if c.PrependShell { + args = withShell(d.image, args) + } + d.image.Config.Cmd = args + d.image.Config.ArgsEscaped = true + d.cmdSet = true + return commitToHistory(&d.image, fmt.Sprintf("CMD %q", args), false, nil) +} + +func dispatchEntrypoint(d *dispatchState, c *instructions.EntrypointCommand) error { + var args []string = c.CmdLine + if c.PrependShell { + args = withShell(d.image, args) + } + d.image.Config.Entrypoint = args + if !d.cmdSet { + d.image.Config.Cmd = nil + } + return commitToHistory(&d.image, fmt.Sprintf("ENTRYPOINT %q", args), false, nil) +} + +func dispatchHealthcheck(d *dispatchState, c *instructions.HealthCheckCommand) error { + d.image.Config.Healthcheck = &HealthConfig{ + Test: c.Health.Test, + Interval: c.Health.Interval, + Timeout: c.Health.Timeout, + StartPeriod: c.Health.StartPeriod, + Retries: c.Health.Retries, + } + return commitToHistory(&d.image, fmt.Sprintf("HEALTHCHECK %q", d.image.Config.Healthcheck), false, nil) +} + +func dispatchExpose(d *dispatchState, c *instructions.ExposeCommand, shlex *shell.Lex) error { + ports := []string{} + for _, p := range c.Ports { + ps, err := shlex.ProcessWords(p, toEnvList(d.buildArgs, d.image.Config.Env)) + if err != nil { + return err + } + ports = append(ports, ps...) + } + c.Ports = ports + + ps, _, err := nat.ParsePortSpecs(c.Ports) + if err != nil { + return err + } + + if d.image.Config.ExposedPorts == nil { + d.image.Config.ExposedPorts = make(map[string]struct{}) + } + for p := range ps { + d.image.Config.ExposedPorts[string(p)] = struct{}{} + } + + return commitToHistory(&d.image, fmt.Sprintf("EXPOSE %v", ps), false, nil) +} + +func dispatchUser(d *dispatchState, c *instructions.UserCommand, commit bool) error { + d.state = d.state.User(c.User) + d.image.Config.User = c.User + if commit { + return commitToHistory(&d.image, fmt.Sprintf("USER %v", c.User), false, nil) + } + return nil +} + +func dispatchVolume(d *dispatchState, c *instructions.VolumeCommand) error { + if d.image.Config.Volumes == nil { + d.image.Config.Volumes = map[string]struct{}{} + } + for _, v := range c.Volumes { + if v == "" { + return errors.New("VOLUME specified can not be an empty string") + } + d.image.Config.Volumes[v] = struct{}{} + } + return commitToHistory(&d.image, fmt.Sprintf("VOLUME %v", c.Volumes), false, nil) +} + +func dispatchStopSignal(d *dispatchState, c *instructions.StopSignalCommand) error { + if _, err := signal.ParseSignal(c.Signal); err != nil { + return err + } + d.image.Config.StopSignal = c.Signal + return commitToHistory(&d.image, fmt.Sprintf("STOPSIGNAL %v", c.Signal), false, nil) +} + +func dispatchShell(d *dispatchState, c *instructions.ShellCommand) error { + d.image.Config.Shell = c.Shell + return commitToHistory(&d.image, fmt.Sprintf("SHELL %v", c.Shell), false, nil) +} + +func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instructions.ArgCommand, buildArgValues map[string]string) error { + commitStr := "ARG " + c.Key + if c.Value != nil { + commitStr += "=" + *c.Value + } + if c.Value == nil { + for _, ma := range metaArgs { + if ma.Key == c.Key { + c.Value = ma.Value + } + } + } + + d.buildArgs = append(d.buildArgs, setBuildArgValue(*c, buildArgValues)) + return commitToHistory(&d.image, commitStr, false, nil) +} + +func pathRelativeToWorkingDir(s llb.State, p string) string { + if path.IsAbs(p) { + return p + } + return path.Join(s.GetDir(), p) +} + +func splitWildcards(name string) (string, string) { + i := 0 + for ; i < len(name); i++ { + ch := name[i] + if ch == '\\' { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + break + } + } + if i == len(name) { + return name, "" + } + + base := path.Base(name[:i]) + if name[:i] == "" || strings.HasSuffix(name[:i], string(filepath.Separator)) { + base = "" + } + return path.Dir(name[:i]), base + name[i:] +} + +func addEnv(env []string, k, v string, override bool) []string { + gotOne := false + for i, envVar := range env { + envParts := strings.SplitN(envVar, "=", 2) + compareFrom := envParts[0] + if shell.EqualEnvKeys(compareFrom, k) { + if override { + env[i] = k + "=" + v + } + gotOne = true + break + } + } + if !gotOne { + env = append(env, k+"="+v) + } + return env +} + +func setBuildArgValue(c instructions.ArgCommand, values map[string]string) instructions.ArgCommand { + if v, ok := values[c.Key]; ok { + c.Value = &v + } + return c +} + +func toEnvList(args []instructions.ArgCommand, env []string) []string { + for _, arg := range args { + env = addEnv(env, arg.Key, getArgValue(arg), false) + } + return env +} + +func getArgValue(arg instructions.ArgCommand) string { + v := "" + if arg.Value != nil { + v = *arg.Value + } + return v +} + +func dfCmd(cmd interface{}) llb.MetadataOpt { + // TODO: add fmt.Stringer to instructions.Command to remove interface{} + var cmdStr string + if cmd, ok := cmd.(fmt.Stringer); ok { + cmdStr = cmd.String() + } + if cmd, ok := cmd.(string); ok { + cmdStr = cmd + } + return llb.WithDescription(map[string]string{ + "com.docker.dockerfile.v1.command": cmdStr, + }) +} + +func runCommandString(args []string, buildArgs []instructions.ArgCommand) string { + var tmpBuildEnv []string + for _, arg := range buildArgs { + tmpBuildEnv = append(tmpBuildEnv, arg.Key+"="+getArgValue(arg)) + } + if len(tmpBuildEnv) > 0 { + tmpBuildEnv = append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...) + } + + return strings.Join(append(tmpBuildEnv, args...), " ") +} + +func commitToHistory(img *Image, msg string, withLayer bool, st *llb.State) error { + if st != nil { + msg += " # buildkit" + } + + img.History = append(img.History, ocispec.History{ + CreatedBy: msg, + Comment: historyComment, + EmptyLayer: !withLayer, + }) + return nil +} + +func isReachable(from, to *dispatchState) (ret bool) { + if from == nil { + return false + } + if from == to || isReachable(from.base, to) { + return true + } + for d := range from.deps { + if isReachable(d, to) { + return true + } + } + return false +} + +func parseUser(str string) (uid uint32, gid uint32, err error) { + if str == "" { + return 0, 0, nil + } + parts := strings.SplitN(str, ":", 2) + for i, v := range parts { + switch i { + case 0: + uid, err = parseUID(v) + if err != nil { + return 0, 0, err + } + if len(parts) == 1 { + gid = uid + } + case 1: + gid, err = parseUID(v) + if err != nil { + return 0, 0, err + } + } + } + return +} + +func parseUID(str string) (uint32, error) { + if str == "root" { + return 0, nil + } + uid, err := strconv.ParseUint(str, 10, 32) + if err != nil { + return 0, err + } + return uint32(uid), nil +} + +func normalizeContextPaths(paths map[string]struct{}) []string { + pathSlice := make([]string, 0, len(paths)) + for p := range paths { + if p == "/" { + return nil + } + pathSlice = append(pathSlice, p) + } + + toDelete := map[string]struct{}{} + for i := range pathSlice { + for j := range pathSlice { + if i == j { + continue + } + if strings.HasPrefix(pathSlice[j], pathSlice[i]+"/") { + delete(paths, pathSlice[j]) + } + } + } + + toSort := make([]string, 0, len(paths)) + for p := range paths { + if _, ok := toDelete[p]; !ok { + toSort = append(toSort, path.Join(".", p)) + } + } + sort.Slice(toSort, func(i, j int) bool { + return toSort[i] < toSort[j] + }) + return toSort +} + +func proxyEnvFromBuildArgs(args map[string]string) *llb.ProxyEnv { + pe := &llb.ProxyEnv{} + isNil := true + for k, v := range args { + if strings.EqualFold(k, "http_proxy") { + pe.HttpProxy = v + isNil = false + } + if strings.EqualFold(k, "https_proxy") { + pe.HttpsProxy = v + isNil = false + } + if strings.EqualFold(k, "ftp_proxy") { + pe.FtpProxy = v + isNil = false + } + if strings.EqualFold(k, "no_proxy") { + pe.NoProxy = v + isNil = false + } + } + if isNil { + return nil + } + return pe +} + +type mutableOutput struct { + llb.Output +} + +func withShell(img Image, args []string) []string { + var shell []string + if len(img.Config.Shell) > 0 { + shell = append([]string{}, img.Config.Shell...) + } else { + shell = defaultShell() + } + return append(shell, strings.Join(args, " ")) +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunmount.go new file mode 100644 index 0000000000..b358ee68f3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunmount.go @@ -0,0 +1,16 @@ +// +build !dfrunmount,!dfextall + +package dockerfile2llb + +import ( + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend/dockerfile/instructions" +) + +func detectRunMount(cmd *command, dispatchStatesByName map[string]*dispatchState, allDispatchStates []*dispatchState) bool { + return false +} + +func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*dispatchState, opt dispatchOpt) []llb.RunOption { + return nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go new file mode 100644 index 0000000000..f1b20293f4 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go @@ -0,0 +1,74 @@ +// +build dfrunmount dfextall + +package dockerfile2llb + +import ( + "path" + "path/filepath" + "strings" + + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend/dockerfile/instructions" +) + +func detectRunMount(cmd *command, dispatchStatesByName map[string]*dispatchState, allDispatchStates []*dispatchState) bool { + if c, ok := cmd.Command.(*instructions.RunCommand); ok { + mounts := instructions.GetMounts(c) + sources := make([]*dispatchState, len(mounts)) + for i, mount := range mounts { + if mount.From == "" && mount.Type == instructions.MountTypeCache { + mount.From = emptyImageName + } + from := mount.From + if from == "" || mount.Type == instructions.MountTypeTmpfs { + continue + } + stn, ok := dispatchStatesByName[strings.ToLower(from)] + if !ok { + stn = &dispatchState{ + stage: instructions.Stage{BaseName: from}, + deps: make(map[*dispatchState]struct{}), + unregistered: true, + } + } + sources[i] = stn + } + cmd.sources = sources + return true + } + + return false +} + +func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*dispatchState, opt dispatchOpt) []llb.RunOption { + var out []llb.RunOption + mounts := instructions.GetMounts(c) + + for i, mount := range mounts { + if mount.From == "" && mount.Type == instructions.MountTypeCache { + mount.From = emptyImageName + } + st := opt.buildContext + if mount.From != "" { + st = sources[i].state + } + var mountOpts []llb.MountOption + if mount.Type == instructions.MountTypeTmpfs { + st = llb.Scratch() + mountOpts = append(mountOpts, llb.Tmpfs()) + } + if mount.ReadOnly { + mountOpts = append(mountOpts, llb.Readonly) + } + if mount.Type == instructions.MountTypeCache { + mountOpts = append(mountOpts, llb.AsPersistentCacheDir(opt.cacheIDNamespace+"/"+mount.CacheID)) + } + if src := path.Join("/", mount.Source); src != "/" { + mountOpts = append(mountOpts, llb.SourcePath(src)) + } + out = append(out, llb.AddMount(path.Join("/", mount.Target), st, mountOpts...)) + + d.ctxPaths[path.Join("/", filepath.ToSlash(mount.Source))] = struct{}{} + } + return out +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_unix.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_unix.go new file mode 100644 index 0000000000..b5d541d1f5 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package dockerfile2llb + +func defaultShell() []string { + return []string{"/bin/sh", "-c"} +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_windows.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_windows.go new file mode 100644 index 0000000000..7693e05086 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_windows.go @@ -0,0 +1,7 @@ +// +build windows + +package dockerfile2llb + +func defaultShell() []string { + return []string{"cmd", "/S", "/C"} +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go new file mode 100644 index 0000000000..cf06b5ad85 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go @@ -0,0 +1,38 @@ +package dockerfile2llb + +import ( + "bufio" + "io" + "regexp" + "strings" +) + +const keySyntax = "syntax" + +var reDirective = regexp.MustCompile(`^#\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`) + +func DetectSyntax(r io.Reader) (string, string, bool) { + directives := ParseDirectives(r) + if len(directives) == 0 { + return "", "", false + } + v, ok := directives[keySyntax] + if !ok { + return "", "", false + } + p := strings.SplitN(v, " ", 2) + return p[0], v, true +} + +func ParseDirectives(r io.Reader) map[string]string { + m := map[string]string{} + s := bufio.NewScanner(r) + for s.Scan() { + match := reDirective.FindStringSubmatch(s.Text()) + if len(match) == 0 { + return m + } + m[strings.ToLower(match[1])] = match[2] + } + return m +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go new file mode 100644 index 0000000000..59c464a63d --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go @@ -0,0 +1,75 @@ +package dockerfile2llb + +import ( + "runtime" + "time" + + "github.com/docker/docker/api/types/strslice" + "github.com/moby/buildkit/util/system" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +type ImageConfig struct { + ocispec.ImageConfig + + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + + // NetworkDisabled bool `json:",omitempty"` // Is network disabled + // MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// Image is the JSON structure which describes some basic information about the image. +// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. +type Image struct { + ocispec.Image + + // Config defines the execution parameters which should be used as a base when running a container using the image. + Config ImageConfig `json:"config,omitempty"` +} + +func clone(src Image) Image { + img := src + img.Config = src.Config + img.Config.Env = append([]string{}, src.Config.Env...) + img.Config.Cmd = append([]string{}, src.Config.Cmd...) + img.Config.Entrypoint = append([]string{}, src.Config.Entrypoint...) + return img +} + +func emptyImage() Image { + img := Image{ + Image: ocispec.Image{ + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + }, + } + img.RootFS.Type = "layers" + img.Config.WorkingDir = "/" + img.Config.Env = []string{"PATH=" + system.DefaultPathEnv} + return img +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/forward.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/forward.go new file mode 100644 index 0000000000..aee1f026ff --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/forward.go @@ -0,0 +1,86 @@ +package dockerfile + +import ( + "context" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/worker" + "github.com/pkg/errors" +) + +func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (*bridgeClient, error) { + return &bridgeClient{opts: opts, FrontendLLBBridge: llbBridge, sid: session.FromContext(ctx)}, nil +} + +type bridgeClient struct { + frontend.FrontendLLBBridge + opts map[string]string + final *ref + sid string + exporterAttr map[string][]byte + refs []*ref +} + +func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest, exporterAttr map[string][]byte, final bool) (client.Reference, error) { + r, exporterAttrRes, err := c.FrontendLLBBridge.Solve(ctx, frontend.SolveRequest{ + Definition: req.Definition, + Frontend: req.Frontend, + FrontendOpt: req.FrontendOpt, + ImportCacheRefs: req.ImportCacheRefs, + }) + if err != nil { + return nil, err + } + rr := &ref{r} + c.refs = append(c.refs, rr) + if final { + c.final = rr + if exporterAttr == nil { + exporterAttr = make(map[string][]byte) + } + for k, v := range exporterAttrRes { + exporterAttr[k] = v + } + c.exporterAttr = exporterAttr + } + return rr, nil +} +func (c *bridgeClient) Opts() map[string]string { + return c.opts +} +func (c *bridgeClient) SessionID() string { + return c.sid +} + +type ref struct { + solver.CachedResult +} + +func (r *ref) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) { + ref, err := r.getImmutableRef() + if err != nil { + return nil, err + } + newReq := cache.ReadRequest{ + Filename: req.Filename, + } + if r := req.Range; r != nil { + newReq.Range = &cache.FileRange{ + Offset: r.Offset, + Length: r.Length, + } + } + return cache.ReadFile(ctx, ref, newReq) +} + +func (r *ref) getImmutableRef() (cache.ImmutableRef, error) { + ref, ok := r.CachedResult.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid ref: %T", r.CachedResult.Sys()) + } + return ref.ImmutableRef, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go index 7a81e3c136..e299d52323 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go @@ -11,6 +11,7 @@ type FlagType int const ( boolType FlagType = iota stringType + stringsType ) // BFlags contains all flags information for the builder @@ -23,10 +24,11 @@ type BFlags struct { // Flag contains all information for a flag type Flag struct { - bf *BFlags - name string - flagType FlagType - Value string + bf *BFlags + name string + flagType FlagType + Value string + StringValues []string } // NewBFlags returns the new BFlags struct @@ -70,6 +72,15 @@ func (bf *BFlags) AddString(name string, def string) *Flag { return flag } +// AddString adds a string flag to BFlags that can match multiple values +func (bf *BFlags) AddStrings(name string) *Flag { + flag := bf.addFlag(name, stringsType) + if flag == nil { + return nil + } + return flag +} + // addFlag is a generic func used by the other AddXXX() func // to add a new flag to the BFlags struct. // Note, any error will be generated when Parse() is called (see Parse). @@ -145,7 +156,7 @@ func (bf *BFlags) Parse() error { return fmt.Errorf("Unknown flag: %s", arg) } - if _, ok = bf.used[arg]; ok { + if _, ok = bf.used[arg]; ok && flag.flagType != stringsType { return fmt.Errorf("Duplicate flag specified: %s", arg) } @@ -173,6 +184,12 @@ func (bf *BFlags) Parse() error { } flag.Value = value + case stringsType: + if index < 0 { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + flag.StringValues = append(flag.StringValues, value) + default: panic("No idea what kind of flag we have! Should never get here!") } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go index 12478463cd..3e4617ac91 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go @@ -233,6 +233,7 @@ type ShellDependantCmdLine struct { // type RunCommand struct { withNameAndCode + withExternalData ShellDependantCmdLine } @@ -416,3 +417,18 @@ func HasStage(s []Stage, name string) (int, bool) { } return -1, false } + +type withExternalData struct { + m map[interface{}]interface{} +} + +func (c *withExternalData) getExternalValue(k interface{}) interface{} { + return c.m[k] +} + +func (c *withExternalData) setExternalValue(k, v interface{}) { + if c.m == nil { + c.m = map[interface{}]interface{}{} + } + c.m[k] = v +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go new file mode 100644 index 0000000000..71030fa72b --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go @@ -0,0 +1,161 @@ +// +build dfrunmount dfextall + +package instructions + +import ( + "encoding/csv" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +const MountTypeBind = "bind" +const MountTypeCache = "cache" +const MountTypeTmpfs = "tmpfs" + +var allowedMountTypes = map[string]struct{}{ + MountTypeBind: {}, + MountTypeCache: {}, + MountTypeTmpfs: {}, +} + +type mountsKeyT string + +var mountsKey = mountsKeyT("dockerfile/run/mounts") + +func init() { + parseRunPreHooks = append(parseRunPreHooks, runMountPreHook) + parseRunPostHooks = append(parseRunPostHooks, runMountPostHook) +} + +func isValidMountType(s string) bool { + _, ok := allowedMountTypes[s] + return ok +} + +func runMountPreHook(cmd *RunCommand, req parseRequest) error { + st := &mountState{} + st.flag = req.flags.AddStrings("mount") + cmd.setExternalValue(mountsKey, st) + return nil +} + +func runMountPostHook(cmd *RunCommand, req parseRequest) error { + st := getMountState(cmd) + if st == nil { + return errors.Errorf("no mount state") + } + var mounts []*Mount + for _, str := range st.flag.StringValues { + m, err := parseMount(str) + if err != nil { + return err + } + mounts = append(mounts, m) + } + st.mounts = mounts + return nil +} + +func getMountState(cmd *RunCommand) *mountState { + v := cmd.getExternalValue(mountsKey) + if v == nil { + return nil + } + return v.(*mountState) +} + +func GetMounts(cmd *RunCommand) []*Mount { + return getMountState(cmd).mounts +} + +type mountState struct { + flag *Flag + mounts []*Mount +} + +type Mount struct { + Type string + From string + Source string + Target string + ReadOnly bool + CacheID string +} + +func parseMount(value string) (*Mount, error) { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return nil, errors.Wrap(err, "failed to parse csv mounts") + } + + m := &Mount{Type: MountTypeBind} + + roAuto := true + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + key := strings.ToLower(parts[0]) + + if len(parts) == 1 { + switch key { + case "readonly", "ro": + m.ReadOnly = true + roAuto = false + continue + case "readwrite", "rw": + m.ReadOnly = false + roAuto = false + continue + } + } + + if len(parts) != 2 { + return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field) + } + + value := parts[1] + switch key { + case "type": + if !isValidMountType(strings.ToLower(value)) { + return nil, errors.Errorf("invalid mount type %q", value) + } + m.Type = strings.ToLower(value) + case "from": + m.From = value + case "source", "src": + m.Source = value + case "target", "dst", "destination": + m.Target = value + case "readonly", "ro": + m.ReadOnly, err = strconv.ParseBool(value) + if err != nil { + return nil, errors.Errorf("invalid value for %s: %s", key, value) + } + roAuto = false + case "readwrite", "rw": + rw, err := strconv.ParseBool(value) + if err != nil { + return nil, errors.Errorf("invalid value for %s: %s", key, value) + } + m.ReadOnly = !rw + roAuto = false + case "id": + m.CacheID = value + default: + return nil, errors.Errorf("unexpected key '%s' in '%s'", key, field) + } + } + + if roAuto { + if m.Type == MountTypeCache { + m.ReadOnly = false + } else { + m.ReadOnly = true + } + } + + return m, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go index 1285ac3306..8c8199e1f7 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go @@ -24,6 +24,9 @@ type parseRequest struct { original string } +var parseRunPreHooks []func(*RunCommand, parseRequest) error +var parseRunPostHooks []func(*RunCommand, parseRequest) error + func nodeArgs(node *parser.Node) []string { result := []string{} for ; node.Next != nil; node = node.Next { @@ -355,15 +358,28 @@ func parseShellDependentCommand(req parseRequest, emptyAsNil bool) ShellDependan } func parseRun(req parseRequest) (*RunCommand, error) { + cmd := &RunCommand{} + + for _, fn := range parseRunPreHooks { + if err := fn(cmd, req); err != nil { + return nil, err + } + } if err := req.flags.Parse(); err != nil { return nil, err } - return &RunCommand{ - ShellDependantCmdLine: parseShellDependentCommand(req, false), - withNameAndCode: newWithNameAndCode(req), - }, nil + cmd.ShellDependantCmdLine = parseShellDependentCommand(req, false) + cmd.withNameAndCode = newWithNameAndCode(req) + + for _, fn := range parseRunPostHooks { + if err := fn(cmd, req); err != nil { + return nil, err + } + } + + return cmd, nil } func parseCmd(req parseRequest) (*CmdCommand, error) { diff --git a/vendor/github.com/moby/buildkit/frontend/frontend.go b/vendor/github.com/moby/buildkit/frontend/frontend.go new file mode 100644 index 0000000000..f522f59964 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/frontend.go @@ -0,0 +1,29 @@ +package frontend + +import ( + "context" + "io" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" +) + +type Frontend interface { + Solve(ctx context.Context, llb FrontendLLBBridge, opt map[string]string) (solver.CachedResult, map[string][]byte, error) +} + +type FrontendLLBBridge interface { + Solve(ctx context.Context, req SolveRequest) (solver.CachedResult, map[string][]byte, error) + ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error) + Exec(ctx context.Context, meta executor.Meta, rootfs cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error +} + +type SolveRequest struct { + Definition *pb.Definition + Frontend string + FrontendOpt map[string]string + ImportCacheRefs []string +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go b/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go new file mode 100644 index 0000000000..784925a0b7 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go @@ -0,0 +1,40 @@ +package client + +import ( + "context" + + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" +) + +// TODO: make this take same options as LLBBridge. Add Return() +type Client interface { + Solve(ctx context.Context, req SolveRequest, exporterAttr map[string][]byte, final bool) (Reference, error) + ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error) + Opts() map[string]string + SessionID() string +} + +type Reference interface { + ReadFile(ctx context.Context, req ReadRequest) ([]byte, error) + // StatFile(ctx context.Context, req StatRequest) (*StatResponse, error) + // ReadDir(ctx context.Context, req ReadDirRequest) ([]*StatResponse, error) +} + +type ReadRequest struct { + Filename string + Range *FileRange +} + +type FileRange struct { + Offset int + Length int +} + +// SolveRequest is same as frontend.SolveRequest but avoiding dependency +type SolveRequest struct { + Definition *pb.Definition + Frontend string + FrontendOpt map[string]string + ImportCacheRefs []string +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go new file mode 100644 index 0000000000..36726206de --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go @@ -0,0 +1,348 @@ +package gateway + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net" + "os" + "strings" + "time" + + "github.com/docker/distribution/reference" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/frontend" + pb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/tracing" + "github.com/moby/buildkit/worker" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/net/http2" + "google.golang.org/grpc" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +const ( + keySource = "source" + keyDevel = "gateway-devel" + exporterImageConfig = "containerimage.config" +) + +func NewGatewayFrontend() frontend.Frontend { + return &gatewayFrontend{} +} + +type gatewayFrontend struct { +} + +func filterPrefix(opts map[string]string, pfx string) map[string]string { + m := map[string]string{} + for k, v := range opts { + if strings.HasPrefix(k, pfx) { + m[strings.TrimPrefix(k, pfx)] = v + } + } + return m +} + +func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (retRef solver.CachedResult, exporterAttr map[string][]byte, retErr error) { + source, ok := opts[keySource] + if !ok { + return nil, nil, errors.Errorf("no source specified for gateway") + } + + sid := session.FromContext(ctx) + + _, isDevel := opts[keyDevel] + var img ocispec.Image + var rootFS cache.ImmutableRef + var readonly bool // TODO: try to switch to read-only by default. + + if isDevel { + ref, exp, err := llbBridge.Solve(session.NewContext(ctx, "gateway:"+sid), + frontend.SolveRequest{ + Frontend: source, + FrontendOpt: filterPrefix(opts, "gateway-"), + }) + if err != nil { + return nil, nil, err + } + defer ref.Release(context.TODO()) + + workerRef, ok := ref.Sys().(*worker.WorkerRef) + if !ok { + return nil, nil, errors.Errorf("invalid ref: %T", ref.Sys()) + } + rootFS = workerRef.ImmutableRef + config, ok := exp[exporterImageConfig] + if ok { + if err := json.Unmarshal(config, &img); err != nil { + return nil, nil, err + } + } + } else { + sourceRef, err := reference.ParseNormalizedNamed(source) + if err != nil { + return nil, nil, err + } + + dgst, config, err := llbBridge.ResolveImageConfig(ctx, reference.TagNameOnly(sourceRef).String()) + if err != nil { + return nil, nil, err + } + + if err := json.Unmarshal(config, &img); err != nil { + return nil, nil, err + } + + sourceRef, err = reference.WithDigest(sourceRef, dgst) + if err != nil { + return nil, nil, err + } + + src := llb.Image(sourceRef.String()) + + def, err := src.Marshal() + if err != nil { + return nil, nil, err + } + + ref, _, err := llbBridge.Solve(ctx, frontend.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, nil, err + } + defer ref.Release(context.TODO()) + workerRef, ok := ref.Sys().(*worker.WorkerRef) + if !ok { + return nil, nil, errors.Errorf("invalid ref: %T", ref.Sys()) + } + rootFS = workerRef.ImmutableRef + } + + lbf, err := newLLBBridgeForwarder(ctx, llbBridge) + defer lbf.conn.Close() + if err != nil { + return nil, nil, err + } + + args := []string{"/run"} + env := []string{} + cwd := "/" + if img.Config.Env != nil { + env = img.Config.Env + } + if img.Config.Entrypoint != nil { + args = img.Config.Entrypoint + } + if img.Config.WorkingDir != "" { + cwd = img.Config.WorkingDir + } + i := 0 + for k, v := range opts { + env = append(env, fmt.Sprintf("BUILDKIT_FRONTEND_OPT_%d", i)+"="+k+"="+v) + i++ + } + + env = append(env, "BUILDKIT_SESSION_ID="+sid) + + defer func() { + for _, r := range lbf.refs { + if r != nil && (lbf.lastRef != r || retErr != nil) { + r.Release(context.TODO()) + } + } + }() + + err = llbBridge.Exec(ctx, executor.Meta{ + Env: env, + Args: args, + Cwd: cwd, + ReadonlyRootFS: readonly, + }, rootFS, lbf.Stdin, lbf.Stdout, os.Stderr) + + if err != nil { + return nil, nil, err + } + + return lbf.lastRef, lbf.exporterAttr, nil +} + +func newLLBBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge) (*llbBridgeForwarder, error) { + lbf := &llbBridgeForwarder{ + callCtx: ctx, + llbBridge: llbBridge, + refs: map[string]solver.Result{}, + pipe: newPipe(), + } + + server := grpc.NewServer() + grpc_health_v1.RegisterHealthServer(server, health.NewServer()) + pb.RegisterLLBBridgeServer(server, lbf) + + go serve(ctx, server, lbf.conn) + + return lbf, nil +} + +type pipe struct { + Stdin io.ReadCloser + Stdout io.WriteCloser + conn net.Conn +} + +func newPipe() *pipe { + pr1, pw1, _ := os.Pipe() + pr2, pw2, _ := os.Pipe() + return &pipe{ + Stdin: pr1, + Stdout: pw2, + conn: &conn{ + Reader: pr2, + Writer: pw1, + Closer: pw2, + }, + } +} + +type conn struct { + io.Reader + io.Writer + io.Closer +} + +func (s *conn) LocalAddr() net.Addr { + return dummyAddr{} +} +func (s *conn) RemoteAddr() net.Addr { + return dummyAddr{} +} +func (s *conn) SetDeadline(t time.Time) error { + return nil +} +func (s *conn) SetReadDeadline(t time.Time) error { + return nil +} +func (s *conn) SetWriteDeadline(t time.Time) error { + return nil +} + +type dummyAddr struct { +} + +func (d dummyAddr) Network() string { + return "pipe" +} + +func (d dummyAddr) String() string { + return "localhost" +} + +type llbBridgeForwarder struct { + callCtx context.Context + llbBridge frontend.FrontendLLBBridge + refs map[string]solver.Result + lastRef solver.CachedResult + exporterAttr map[string][]byte + *pipe +} + +func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.ResolveImageConfigRequest) (*pb.ResolveImageConfigResponse, error) { + ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) + dgst, dt, err := lbf.llbBridge.ResolveImageConfig(ctx, req.Ref) + if err != nil { + return nil, err + } + return &pb.ResolveImageConfigResponse{ + Digest: dgst, + Config: dt, + }, nil +} + +func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) (*pb.SolveResponse, error) { + ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) + ref, expResp, err := lbf.llbBridge.Solve(ctx, frontend.SolveRequest{ + Definition: req.Definition, + Frontend: req.Frontend, + FrontendOpt: req.FrontendOpt, + ImportCacheRefs: req.ImportCacheRefs, + }) + if err != nil { + return nil, err + } + + exp := map[string][]byte{} + if err := json.Unmarshal(req.ExporterAttr, &exp); err != nil { + return nil, err + } + + if expResp != nil { + for k, v := range expResp { + exp[k] = v + } + } + + id := identity.NewID() + lbf.refs[id] = ref + if req.Final { + lbf.lastRef = ref + lbf.exporterAttr = exp + } + if ref == nil { + id = "" + } + return &pb.SolveResponse{Ref: id}, nil +} +func (lbf *llbBridgeForwarder) ReadFile(ctx context.Context, req *pb.ReadFileRequest) (*pb.ReadFileResponse, error) { + ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) + ref, ok := lbf.refs[req.Ref] + if !ok { + return nil, errors.Errorf("no such ref: %v", req.Ref) + } + if ref == nil { + return nil, errors.Wrapf(os.ErrNotExist, "%s no found", req.FilePath) + } + workerRef, ok := ref.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid ref: %T", ref.Sys()) + } + + newReq := cache.ReadRequest{ + Filename: req.FilePath, + } + if r := req.Range; r != nil { + newReq.Range = &cache.FileRange{ + Offset: int(r.Offset), + Length: int(r.Length), + } + } + + dt, err := cache.ReadFile(ctx, workerRef.ImmutableRef, newReq) + if err != nil { + return nil, err + } + + return &pb.ReadFileResponse{Data: dt}, nil +} + +func (lbf *llbBridgeForwarder) Ping(context.Context, *pb.PingRequest) (*pb.PongResponse, error) { + return &pb.PongResponse{}, nil +} + +func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) { + go func() { + <-ctx.Done() + conn.Close() + }() + logrus.Debugf("serving grpc connection") + (&http2.Server{}).ServeConn(conn, &http2.ServeConnOpts{Handler: grpcServer}) +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go new file mode 100644 index 0000000000..eb20999563 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go @@ -0,0 +1,2042 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gateway.proto + +/* + Package moby_buildkit_v1_frontend is a generated protocol buffer package. + + It is generated from these files: + gateway.proto + + It has these top-level messages: + ResolveImageConfigRequest + ResolveImageConfigResponse + SolveRequest + SolveResponse + ReadFileRequest + FileRange + ReadFileResponse + PingRequest + PongResponse +*/ +package moby_buildkit_v1_frontend + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import pb "github.com/moby/buildkit/solver/pb" + +import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type ResolveImageConfigRequest struct { + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` +} + +func (m *ResolveImageConfigRequest) Reset() { *m = ResolveImageConfigRequest{} } +func (m *ResolveImageConfigRequest) String() string { return proto.CompactTextString(m) } +func (*ResolveImageConfigRequest) ProtoMessage() {} +func (*ResolveImageConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{0} } + +func (m *ResolveImageConfigRequest) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + +type ResolveImageConfigResponse struct { + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=Digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"Digest"` + Config []byte `protobuf:"bytes,2,opt,name=Config,proto3" json:"Config,omitempty"` +} + +func (m *ResolveImageConfigResponse) Reset() { *m = ResolveImageConfigResponse{} } +func (m *ResolveImageConfigResponse) String() string { return proto.CompactTextString(m) } +func (*ResolveImageConfigResponse) ProtoMessage() {} +func (*ResolveImageConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptorGateway, []int{1} +} + +func (m *ResolveImageConfigResponse) GetConfig() []byte { + if m != nil { + return m.Config + } + return nil +} + +type SolveRequest struct { + Definition *pb.Definition `protobuf:"bytes,1,opt,name=Definition" json:"Definition,omitempty"` + Frontend string `protobuf:"bytes,2,opt,name=Frontend,proto3" json:"Frontend,omitempty"` + FrontendOpt map[string]string `protobuf:"bytes,3,rep,name=FrontendOpt" json:"FrontendOpt,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ImportCacheRefs []string `protobuf:"bytes,4,rep,name=ImportCacheRefs" json:"ImportCacheRefs,omitempty"` + Final bool `protobuf:"varint,10,opt,name=Final,proto3" json:"Final,omitempty"` + ExporterAttr []byte `protobuf:"bytes,11,opt,name=ExporterAttr,proto3" json:"ExporterAttr,omitempty"` +} + +func (m *SolveRequest) Reset() { *m = SolveRequest{} } +func (m *SolveRequest) String() string { return proto.CompactTextString(m) } +func (*SolveRequest) ProtoMessage() {} +func (*SolveRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{2} } + +func (m *SolveRequest) GetDefinition() *pb.Definition { + if m != nil { + return m.Definition + } + return nil +} + +func (m *SolveRequest) GetFrontend() string { + if m != nil { + return m.Frontend + } + return "" +} + +func (m *SolveRequest) GetFrontendOpt() map[string]string { + if m != nil { + return m.FrontendOpt + } + return nil +} + +func (m *SolveRequest) GetImportCacheRefs() []string { + if m != nil { + return m.ImportCacheRefs + } + return nil +} + +func (m *SolveRequest) GetFinal() bool { + if m != nil { + return m.Final + } + return false +} + +func (m *SolveRequest) GetExporterAttr() []byte { + if m != nil { + return m.ExporterAttr + } + return nil +} + +type SolveResponse struct { + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + ExporterAttr []byte `protobuf:"bytes,2,opt,name=ExporterAttr,proto3" json:"ExporterAttr,omitempty"` +} + +func (m *SolveResponse) Reset() { *m = SolveResponse{} } +func (m *SolveResponse) String() string { return proto.CompactTextString(m) } +func (*SolveResponse) ProtoMessage() {} +func (*SolveResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{3} } + +func (m *SolveResponse) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + +func (m *SolveResponse) GetExporterAttr() []byte { + if m != nil { + return m.ExporterAttr + } + return nil +} + +type ReadFileRequest struct { + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + FilePath string `protobuf:"bytes,2,opt,name=FilePath,proto3" json:"FilePath,omitempty"` + Range *FileRange `protobuf:"bytes,3,opt,name=Range" json:"Range,omitempty"` +} + +func (m *ReadFileRequest) Reset() { *m = ReadFileRequest{} } +func (m *ReadFileRequest) String() string { return proto.CompactTextString(m) } +func (*ReadFileRequest) ProtoMessage() {} +func (*ReadFileRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{4} } + +func (m *ReadFileRequest) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + +func (m *ReadFileRequest) GetFilePath() string { + if m != nil { + return m.FilePath + } + return "" +} + +func (m *ReadFileRequest) GetRange() *FileRange { + if m != nil { + return m.Range + } + return nil +} + +type FileRange struct { + Offset int64 `protobuf:"varint,1,opt,name=Offset,proto3" json:"Offset,omitempty"` + Length int64 `protobuf:"varint,2,opt,name=Length,proto3" json:"Length,omitempty"` +} + +func (m *FileRange) Reset() { *m = FileRange{} } +func (m *FileRange) String() string { return proto.CompactTextString(m) } +func (*FileRange) ProtoMessage() {} +func (*FileRange) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{5} } + +func (m *FileRange) GetOffset() int64 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *FileRange) GetLength() int64 { + if m != nil { + return m.Length + } + return 0 +} + +type ReadFileResponse struct { + Data []byte `protobuf:"bytes,1,opt,name=Data,proto3" json:"Data,omitempty"` +} + +func (m *ReadFileResponse) Reset() { *m = ReadFileResponse{} } +func (m *ReadFileResponse) String() string { return proto.CompactTextString(m) } +func (*ReadFileResponse) ProtoMessage() {} +func (*ReadFileResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{6} } + +func (m *ReadFileResponse) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type PingRequest struct { +} + +func (m *PingRequest) Reset() { *m = PingRequest{} } +func (m *PingRequest) String() string { return proto.CompactTextString(m) } +func (*PingRequest) ProtoMessage() {} +func (*PingRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{7} } + +type PongResponse struct { +} + +func (m *PongResponse) Reset() { *m = PongResponse{} } +func (m *PongResponse) String() string { return proto.CompactTextString(m) } +func (*PongResponse) ProtoMessage() {} +func (*PongResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{8} } + +func init() { + proto.RegisterType((*ResolveImageConfigRequest)(nil), "moby.buildkit.v1.frontend.ResolveImageConfigRequest") + proto.RegisterType((*ResolveImageConfigResponse)(nil), "moby.buildkit.v1.frontend.ResolveImageConfigResponse") + proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.frontend.SolveRequest") + proto.RegisterType((*SolveResponse)(nil), "moby.buildkit.v1.frontend.SolveResponse") + proto.RegisterType((*ReadFileRequest)(nil), "moby.buildkit.v1.frontend.ReadFileRequest") + proto.RegisterType((*FileRange)(nil), "moby.buildkit.v1.frontend.FileRange") + proto.RegisterType((*ReadFileResponse)(nil), "moby.buildkit.v1.frontend.ReadFileResponse") + proto.RegisterType((*PingRequest)(nil), "moby.buildkit.v1.frontend.PingRequest") + proto.RegisterType((*PongResponse)(nil), "moby.buildkit.v1.frontend.PongResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for LLBBridge service + +type LLBBridgeClient interface { + ResolveImageConfig(ctx context.Context, in *ResolveImageConfigRequest, opts ...grpc.CallOption) (*ResolveImageConfigResponse, error) + Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) + ReadFile(ctx context.Context, in *ReadFileRequest, opts ...grpc.CallOption) (*ReadFileResponse, error) + Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PongResponse, error) +} + +type lLBBridgeClient struct { + cc *grpc.ClientConn +} + +func NewLLBBridgeClient(cc *grpc.ClientConn) LLBBridgeClient { + return &lLBBridgeClient{cc} +} + +func (c *lLBBridgeClient) ResolveImageConfig(ctx context.Context, in *ResolveImageConfigRequest, opts ...grpc.CallOption) (*ResolveImageConfigResponse, error) { + out := new(ResolveImageConfigResponse) + err := grpc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ResolveImageConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *lLBBridgeClient) Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) { + out := new(SolveResponse) + err := grpc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Solve", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *lLBBridgeClient) ReadFile(ctx context.Context, in *ReadFileRequest, opts ...grpc.CallOption) (*ReadFileResponse, error) { + out := new(ReadFileResponse) + err := grpc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ReadFile", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *lLBBridgeClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PongResponse, error) { + out := new(PongResponse) + err := grpc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Ping", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for LLBBridge service + +type LLBBridgeServer interface { + ResolveImageConfig(context.Context, *ResolveImageConfigRequest) (*ResolveImageConfigResponse, error) + Solve(context.Context, *SolveRequest) (*SolveResponse, error) + ReadFile(context.Context, *ReadFileRequest) (*ReadFileResponse, error) + Ping(context.Context, *PingRequest) (*PongResponse, error) +} + +func RegisterLLBBridgeServer(s *grpc.Server, srv LLBBridgeServer) { + s.RegisterService(&_LLBBridge_serviceDesc, srv) +} + +func _LLBBridge_ResolveImageConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResolveImageConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LLBBridgeServer).ResolveImageConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ResolveImageConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).ResolveImageConfig(ctx, req.(*ResolveImageConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LLBBridge_Solve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SolveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LLBBridgeServer).Solve(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Solve", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).Solve(ctx, req.(*SolveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LLBBridge_ReadFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadFileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LLBBridgeServer).ReadFile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ReadFile", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).ReadFile(ctx, req.(*ReadFileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LLBBridge_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LLBBridgeServer).Ping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Ping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).Ping(ctx, req.(*PingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _LLBBridge_serviceDesc = grpc.ServiceDesc{ + ServiceName: "moby.buildkit.v1.frontend.LLBBridge", + HandlerType: (*LLBBridgeServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ResolveImageConfig", + Handler: _LLBBridge_ResolveImageConfig_Handler, + }, + { + MethodName: "Solve", + Handler: _LLBBridge_Solve_Handler, + }, + { + MethodName: "ReadFile", + Handler: _LLBBridge_ReadFile_Handler, + }, + { + MethodName: "Ping", + Handler: _LLBBridge_Ping_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "gateway.proto", +} + +func (m *ResolveImageConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResolveImageConfigRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ref) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i += copy(dAtA[i:], m.Ref) + } + return i, nil +} + +func (m *ResolveImageConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResolveImageConfigResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Digest) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintGateway(dAtA, i, uint64(len(m.Digest))) + i += copy(dAtA[i:], m.Digest) + } + if len(m.Config) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintGateway(dAtA, i, uint64(len(m.Config))) + i += copy(dAtA[i:], m.Config) + } + return i, nil +} + +func (m *SolveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Definition != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGateway(dAtA, i, uint64(m.Definition.Size())) + n1, err := m.Definition.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.Frontend) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintGateway(dAtA, i, uint64(len(m.Frontend))) + i += copy(dAtA[i:], m.Frontend) + } + if len(m.FrontendOpt) > 0 { + for k, _ := range m.FrontendOpt { + dAtA[i] = 0x1a + i++ + v := m.FrontendOpt[k] + mapSize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v))) + i = encodeVarintGateway(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGateway(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.ImportCacheRefs) > 0 { + for _, s := range m.ImportCacheRefs { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Final { + dAtA[i] = 0x50 + i++ + if m.Final { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.ExporterAttr) > 0 { + dAtA[i] = 0x5a + i++ + i = encodeVarintGateway(dAtA, i, uint64(len(m.ExporterAttr))) + i += copy(dAtA[i:], m.ExporterAttr) + } + return i, nil +} + +func (m *SolveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SolveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ref) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i += copy(dAtA[i:], m.Ref) + } + if len(m.ExporterAttr) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintGateway(dAtA, i, uint64(len(m.ExporterAttr))) + i += copy(dAtA[i:], m.ExporterAttr) + } + return i, nil +} + +func (m *ReadFileRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadFileRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ref) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i += copy(dAtA[i:], m.Ref) + } + if len(m.FilePath) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintGateway(dAtA, i, uint64(len(m.FilePath))) + i += copy(dAtA[i:], m.FilePath) + } + if m.Range != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGateway(dAtA, i, uint64(m.Range.Size())) + n2, err := m.Range.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *FileRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileRange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Offset != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintGateway(dAtA, i, uint64(m.Offset)) + } + if m.Length != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintGateway(dAtA, i, uint64(m.Length)) + } + return i, nil +} + +func (m *ReadFileResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadFileResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Data) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintGateway(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + return i, nil +} + +func (m *PingRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PingRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *PongResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PongResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func encodeVarintGateway(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ResolveImageConfigRequest) Size() (n int) { + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + return n +} + +func (m *ResolveImageConfigResponse) Size() (n int) { + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.Config) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + return n +} + +func (m *SolveRequest) Size() (n int) { + var l int + _ = l + if m.Definition != nil { + l = m.Definition.Size() + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.Frontend) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if len(m.FrontendOpt) > 0 { + for k, v := range m.FrontendOpt { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v))) + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } + if len(m.ImportCacheRefs) > 0 { + for _, s := range m.ImportCacheRefs { + l = len(s) + n += 1 + l + sovGateway(uint64(l)) + } + } + if m.Final { + n += 2 + } + l = len(m.ExporterAttr) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + return n +} + +func (m *SolveResponse) Size() (n int) { + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.ExporterAttr) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + return n +} + +func (m *ReadFileRequest) Size() (n int) { + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.FilePath) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.Range != nil { + l = m.Range.Size() + n += 1 + l + sovGateway(uint64(l)) + } + return n +} + +func (m *FileRange) Size() (n int) { + var l int + _ = l + if m.Offset != 0 { + n += 1 + sovGateway(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + sovGateway(uint64(m.Length)) + } + return n +} + +func (m *ReadFileResponse) Size() (n int) { + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + return n +} + +func (m *PingRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *PongResponse) Size() (n int) { + var l int + _ = l + return n +} + +func sovGateway(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGateway(x uint64) (n int) { + return sovGateway(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResolveImageConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResolveImageConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResolveImageConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResolveImageConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResolveImageConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config[:0], dAtA[iNdEx:postIndex]...) + if m.Config == nil { + m.Config = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SolveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SolveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SolveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Definition", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Definition == nil { + m.Definition = &pb.Definition{} + } + if err := m.Definition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Frontend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Frontend = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrontendOpt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FrontendOpt == nil { + m.FrontendOpt = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.FrontendOpt[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportCacheRefs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImportCacheRefs = append(m.ImportCacheRefs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Final", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Final = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttr", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExporterAttr = append(m.ExporterAttr[:0], dAtA[iNdEx:postIndex]...) + if m.ExporterAttr == nil { + m.ExporterAttr = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SolveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SolveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SolveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttr", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExporterAttr = append(m.ExporterAttr[:0], dAtA[iNdEx:postIndex]...) + if m.ExporterAttr == nil { + m.ExporterAttr = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadFileRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadFileRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadFileRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FilePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Range == nil { + m.Range = &FileRange{} + } + if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FileRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + m.Offset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Offset |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadFileResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadFileResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadFileResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PingRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PongResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PongResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PongResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGateway(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGateway + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGateway + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGateway + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGateway + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGateway + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGateway(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGateway = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGateway = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("gateway.proto", fileDescriptorGateway) } + +var fileDescriptorGateway = []byte{ + // 629 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4d, 0x4f, 0xdb, 0x40, + 0x10, 0xad, 0x63, 0x40, 0x64, 0x12, 0x3e, 0xb4, 0xaa, 0x2a, 0xe3, 0x03, 0x44, 0x56, 0x45, 0x2d, + 0x5a, 0x6c, 0x35, 0x6d, 0x25, 0x44, 0xa5, 0x4a, 0x0d, 0x1f, 0x12, 0x15, 0x12, 0x68, 0x7b, 0xa8, + 0xc4, 0xcd, 0x4e, 0xc6, 0x66, 0x45, 0xb2, 0xeb, 0xda, 0x1b, 0xda, 0xa8, 0x97, 0xf6, 0xe7, 0xf4, + 0x9f, 0x70, 0xec, 0x99, 0x03, 0xaa, 0xf8, 0x25, 0x95, 0xd7, 0xeb, 0x60, 0x48, 0x49, 0xe9, 0x6d, + 0xdf, 0x78, 0xe6, 0xed, 0x9b, 0x79, 0xb3, 0x86, 0x85, 0x38, 0x90, 0xf8, 0x25, 0x18, 0x79, 0x49, + 0x2a, 0xa4, 0x20, 0x2b, 0x03, 0x11, 0x8e, 0xbc, 0x70, 0xc8, 0xfa, 0xbd, 0x33, 0x26, 0xbd, 0xf3, + 0x97, 0x5e, 0x94, 0x0a, 0x2e, 0x91, 0xf7, 0xec, 0xcd, 0x98, 0xc9, 0xd3, 0x61, 0xe8, 0x75, 0xc5, + 0xc0, 0x8f, 0x45, 0x2c, 0x7c, 0x55, 0x11, 0x0e, 0x23, 0x85, 0x14, 0x50, 0xa7, 0x82, 0xc9, 0x7e, + 0x51, 0x49, 0xcf, 0x49, 0xfd, 0x92, 0xd4, 0xcf, 0x44, 0xff, 0x1c, 0x53, 0x3f, 0x09, 0x7d, 0x91, + 0x64, 0x45, 0xb6, 0xb3, 0x09, 0x2b, 0x14, 0xd5, 0x87, 0x83, 0x41, 0x10, 0xe3, 0x8e, 0xe0, 0x11, + 0x8b, 0x29, 0x7e, 0x1e, 0x62, 0x26, 0xc9, 0x32, 0x98, 0x14, 0x23, 0xcb, 0x68, 0x19, 0x6e, 0x9d, + 0xe6, 0x47, 0xe7, 0xbb, 0x01, 0xf6, 0xdf, 0xf2, 0xb3, 0x44, 0xf0, 0x0c, 0xc9, 0x07, 0x98, 0xdb, + 0x65, 0x31, 0x66, 0xb2, 0xa8, 0xe9, 0xb4, 0x2f, 0xae, 0xd6, 0x1e, 0x5d, 0x5e, 0xad, 0x6d, 0x54, + 0x34, 0x89, 0x04, 0x79, 0x57, 0x70, 0x19, 0x30, 0x8e, 0x69, 0xe6, 0xc7, 0x62, 0xb3, 0xa7, 0x4a, + 0xbc, 0xa2, 0x92, 0x6a, 0x06, 0xf2, 0x04, 0xe6, 0x0a, 0x76, 0xab, 0xd6, 0x32, 0xdc, 0x26, 0xd5, + 0xc8, 0xb9, 0xac, 0x41, 0xf3, 0x63, 0x2e, 0xa0, 0x54, 0xe9, 0x01, 0xec, 0x62, 0xc4, 0x38, 0x93, + 0x4c, 0x70, 0x75, 0x71, 0xa3, 0xbd, 0xe8, 0x25, 0xa1, 0x77, 0x13, 0xa5, 0x95, 0x0c, 0x62, 0xc3, + 0xfc, 0xbe, 0x9e, 0xad, 0xa2, 0xae, 0xd3, 0x31, 0x26, 0x27, 0xd0, 0x28, 0xcf, 0x47, 0x89, 0xb4, + 0xcc, 0x96, 0xe9, 0x36, 0xda, 0x5b, 0xde, 0xbd, 0xe6, 0x78, 0x55, 0x25, 0x5e, 0xa5, 0x74, 0x8f, + 0xcb, 0x74, 0x44, 0xab, 0x64, 0xc4, 0x85, 0xa5, 0x83, 0x41, 0x22, 0x52, 0xb9, 0x13, 0x74, 0x4f, + 0x91, 0x62, 0x94, 0x59, 0x33, 0x2d, 0xd3, 0xad, 0xd3, 0xbb, 0x61, 0xf2, 0x18, 0x66, 0xf7, 0x19, + 0x0f, 0xfa, 0x16, 0xb4, 0x0c, 0x77, 0x9e, 0x16, 0x80, 0x38, 0xd0, 0xdc, 0xfb, 0x9a, 0x27, 0x62, + 0xfa, 0x5e, 0xca, 0xd4, 0x6a, 0xa8, 0xb1, 0xdc, 0x8a, 0xd9, 0xef, 0x60, 0xf9, 0xae, 0x88, 0xdc, + 0xc5, 0x33, 0x1c, 0x95, 0x2e, 0x9e, 0xe1, 0x28, 0xe7, 0x3f, 0x0f, 0xfa, 0x43, 0xd4, 0xed, 0x17, + 0x60, 0xbb, 0xb6, 0x65, 0x38, 0x7b, 0xb0, 0xa0, 0x3b, 0xd2, 0x8e, 0x4e, 0xac, 0xc0, 0x84, 0x8c, + 0xda, 0xa4, 0x0c, 0xe7, 0x1b, 0x2c, 0x51, 0x0c, 0x7a, 0xfb, 0xac, 0x8f, 0xf7, 0xee, 0x92, 0xf2, + 0x81, 0xf5, 0xf1, 0x38, 0x90, 0xa7, 0x63, 0x1f, 0x34, 0x26, 0xdb, 0x30, 0x4b, 0x03, 0x1e, 0xa3, + 0x65, 0x2a, 0x3b, 0x9f, 0x4e, 0x71, 0x40, 0x5d, 0x92, 0xe7, 0xd2, 0xa2, 0xc4, 0x79, 0x0b, 0xf5, + 0x71, 0x2c, 0xdf, 0xa2, 0xa3, 0x28, 0xca, 0xb0, 0xd8, 0x48, 0x93, 0x6a, 0x94, 0xc7, 0x0f, 0x91, + 0xc7, 0xfa, 0x6a, 0x93, 0x6a, 0xe4, 0xac, 0xc3, 0xf2, 0x8d, 0x72, 0x3d, 0x03, 0x02, 0x33, 0xbb, + 0x81, 0x0c, 0x14, 0x43, 0x93, 0xaa, 0xb3, 0xb3, 0x00, 0x8d, 0x63, 0xc6, 0xcb, 0x97, 0xe2, 0x2c, + 0x42, 0xf3, 0x58, 0xf0, 0xf1, 0x43, 0x68, 0xff, 0x34, 0xa1, 0x7e, 0x78, 0xd8, 0xe9, 0xa4, 0xac, + 0x17, 0x23, 0xf9, 0x61, 0x00, 0x99, 0x7c, 0x35, 0xe4, 0xf5, 0x94, 0xae, 0xee, 0x7d, 0x94, 0xf6, + 0x9b, 0xff, 0xac, 0xd2, 0x4d, 0x9c, 0xc0, 0xac, 0x72, 0x96, 0x3c, 0x7b, 0xe0, 0x36, 0xdb, 0xee, + 0xbf, 0x13, 0x35, 0x77, 0x17, 0xe6, 0xcb, 0xa1, 0x91, 0x8d, 0xa9, 0xf2, 0x6e, 0xed, 0x84, 0xfd, + 0xfc, 0x41, 0xb9, 0xfa, 0x92, 0x4f, 0x30, 0x93, 0x4f, 0x9c, 0xac, 0x4f, 0x29, 0xaa, 0x58, 0x62, + 0x4f, 0xeb, 0xb3, 0xea, 0x55, 0xa7, 0x79, 0x71, 0xbd, 0x6a, 0xfc, 0xba, 0x5e, 0x35, 0x7e, 0x5f, + 0xaf, 0x1a, 0xe1, 0x9c, 0xfa, 0x2f, 0xbe, 0xfa, 0x13, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x21, 0xd1, + 0x98, 0xa0, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto new file mode 100644 index 0000000000..55c05e4397 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto @@ -0,0 +1,60 @@ +syntax = "proto3"; + +package moby.buildkit.v1.frontend; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "github.com/moby/buildkit/solver/pb/ops.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +service LLBBridge { + rpc ResolveImageConfig(ResolveImageConfigRequest) returns (ResolveImageConfigResponse); + rpc Solve(SolveRequest) returns (SolveResponse); + rpc ReadFile(ReadFileRequest) returns (ReadFileResponse); + rpc Ping(PingRequest) returns (PongResponse); +} + +message ResolveImageConfigRequest { + string Ref = 1; +} + +message ResolveImageConfigResponse { + string Digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + bytes Config = 2; +} + +message SolveRequest { + pb.Definition Definition = 1; + string Frontend = 2; + map FrontendOpt = 3; + repeated string ImportCacheRefs = 4; + bool Final = 10; + bytes ExporterAttr = 11; +} + +message SolveResponse { + string Ref = 1; // can be used by readfile request + bytes ExporterAttr = 2; +} + +message ReadFileRequest { + string Ref = 1; + string FilePath = 2; + FileRange Range = 3; +} + +message FileRange { + int64 Offset = 1; + int64 Length = 2; +} + +message ReadFileResponse { + bytes Data = 1; +} + +message PingRequest{ +} +message PongResponse{ +} \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go new file mode 100644 index 0000000000..4ab07c6d4a --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go @@ -0,0 +1,3 @@ +package moby_buildkit_v1_frontend + +//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. gateway.proto diff --git a/vendor/github.com/moby/buildkit/session/auth/auth.go b/vendor/github.com/moby/buildkit/session/auth/auth.go new file mode 100644 index 0000000000..2b96a7cef1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/auth/auth.go @@ -0,0 +1,26 @@ +package auth + +import ( + "context" + + "github.com/moby/buildkit/session" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func CredentialsFunc(ctx context.Context, c session.Caller) func(string) (string, string, error) { + return func(host string) (string, string, error) { + client := NewAuthClient(c.Conn()) + + resp, err := client.Credentials(ctx, &CredentialsRequest{ + Host: host, + }) + if err != nil { + if st, ok := status.FromError(err); ok && st.Code() == codes.Unimplemented { + return "", "", nil + } + return "", "", err + } + return resp.Username, resp.Secret, nil + } +} diff --git a/vendor/github.com/moby/buildkit/session/auth/auth.pb.go b/vendor/github.com/moby/buildkit/session/auth/auth.pb.go new file mode 100644 index 0000000000..8993b85b96 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/auth/auth.pb.go @@ -0,0 +1,673 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: auth.proto + +/* + Package auth is a generated protocol buffer package. + + It is generated from these files: + auth.proto + + It has these top-level messages: + CredentialsRequest + CredentialsResponse +*/ +package auth + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type CredentialsRequest struct { + Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` +} + +func (m *CredentialsRequest) Reset() { *m = CredentialsRequest{} } +func (*CredentialsRequest) ProtoMessage() {} +func (*CredentialsRequest) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} } + +func (m *CredentialsRequest) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +type CredentialsResponse struct { + Username string `protobuf:"bytes,1,opt,name=Username,proto3" json:"Username,omitempty"` + Secret string `protobuf:"bytes,2,opt,name=Secret,proto3" json:"Secret,omitempty"` +} + +func (m *CredentialsResponse) Reset() { *m = CredentialsResponse{} } +func (*CredentialsResponse) ProtoMessage() {} +func (*CredentialsResponse) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} } + +func (m *CredentialsResponse) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *CredentialsResponse) GetSecret() string { + if m != nil { + return m.Secret + } + return "" +} + +func init() { + proto.RegisterType((*CredentialsRequest)(nil), "moby.filesync.v1.CredentialsRequest") + proto.RegisterType((*CredentialsResponse)(nil), "moby.filesync.v1.CredentialsResponse") +} +func (this *CredentialsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CredentialsRequest) + if !ok { + that2, ok := that.(CredentialsRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Host != that1.Host { + return false + } + return true +} +func (this *CredentialsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CredentialsResponse) + if !ok { + that2, ok := that.(CredentialsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Username != that1.Username { + return false + } + if this.Secret != that1.Secret { + return false + } + return true +} +func (this *CredentialsRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&auth.CredentialsRequest{") + s = append(s, "Host: "+fmt.Sprintf("%#v", this.Host)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CredentialsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&auth.CredentialsResponse{") + s = append(s, "Username: "+fmt.Sprintf("%#v", this.Username)+",\n") + s = append(s, "Secret: "+fmt.Sprintf("%#v", this.Secret)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringAuth(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Auth service + +type AuthClient interface { + Credentials(ctx context.Context, in *CredentialsRequest, opts ...grpc.CallOption) (*CredentialsResponse, error) +} + +type authClient struct { + cc *grpc.ClientConn +} + +func NewAuthClient(cc *grpc.ClientConn) AuthClient { + return &authClient{cc} +} + +func (c *authClient) Credentials(ctx context.Context, in *CredentialsRequest, opts ...grpc.CallOption) (*CredentialsResponse, error) { + out := new(CredentialsResponse) + err := grpc.Invoke(ctx, "/moby.filesync.v1.Auth/Credentials", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Auth service + +type AuthServer interface { + Credentials(context.Context, *CredentialsRequest) (*CredentialsResponse, error) +} + +func RegisterAuthServer(s *grpc.Server, srv AuthServer) { + s.RegisterService(&_Auth_serviceDesc, srv) +} + +func _Auth_Credentials_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CredentialsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).Credentials(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.filesync.v1.Auth/Credentials", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).Credentials(ctx, req.(*CredentialsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Auth_serviceDesc = grpc.ServiceDesc{ + ServiceName: "moby.filesync.v1.Auth", + HandlerType: (*AuthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Credentials", + Handler: _Auth_Credentials_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "auth.proto", +} + +func (m *CredentialsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CredentialsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Host) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Host))) + i += copy(dAtA[i:], m.Host) + } + return i, nil +} + +func (m *CredentialsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CredentialsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Username) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Username))) + i += copy(dAtA[i:], m.Username) + } + if len(m.Secret) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Secret))) + i += copy(dAtA[i:], m.Secret) + } + return i, nil +} + +func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *CredentialsRequest) Size() (n int) { + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + return n +} + +func (m *CredentialsResponse) Size() (n int) { + var l int + _ = l + l = len(m.Username) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.Secret) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + return n +} + +func sovAuth(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozAuth(x uint64) (n int) { + return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CredentialsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CredentialsRequest{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `}`, + }, "") + return s +} +func (this *CredentialsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CredentialsResponse{`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `}`, + }, "") + return s +} +func valueToStringAuth(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CredentialsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CredentialsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CredentialsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CredentialsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CredentialsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CredentialsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAuth(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthAuth + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipAuth(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) } + +var fileDescriptorAuth = []byte{ + // 224 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0x2c, 0x2d, 0xc9, + 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4, 0x4b, 0xcb, 0xcc, + 0x49, 0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x33, 0x54, 0xd2, 0xe0, 0x12, 0x72, 0x2e, 0x4a, 0x4d, + 0x49, 0xcd, 0x2b, 0xc9, 0x4c, 0xcc, 0x29, 0x0e, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0x12, + 0xe2, 0x62, 0xf1, 0xc8, 0x2f, 0x2e, 0x91, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x95, + 0x3c, 0xb9, 0x84, 0x51, 0x54, 0x16, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x0a, 0x49, 0x71, 0x71, 0x84, + 0x16, 0xa7, 0x16, 0xe5, 0x25, 0xe6, 0xa6, 0x42, 0x95, 0xc3, 0xf9, 0x42, 0x62, 0x5c, 0x6c, 0xc1, + 0xa9, 0xc9, 0x45, 0xa9, 0x25, 0x12, 0x4c, 0x60, 0x19, 0x28, 0xcf, 0x28, 0x89, 0x8b, 0xc5, 0xb1, + 0xb4, 0x24, 0x43, 0x28, 0x8a, 0x8b, 0x1b, 0xc9, 0x48, 0x21, 0x15, 0x3d, 0x74, 0xe7, 0xe9, 0x61, + 0xba, 0x4d, 0x4a, 0x95, 0x80, 0x2a, 0x88, 0xbb, 0x9c, 0x8c, 0x2e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, + 0x50, 0x8e, 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, + 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, + 0x7c, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0x43, 0x14, 0x0b, 0x28, 0x90, 0x92, 0xd8, 0xc0, + 0xa1, 0x64, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x73, 0xf3, 0xd5, 0x33, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/moby/buildkit/session/auth/auth.proto b/vendor/github.com/moby/buildkit/session/auth/auth.proto new file mode 100644 index 0000000000..5933127479 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/auth/auth.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package moby.filesync.v1; + +option go_package = "auth"; + +service Auth{ + rpc Credentials(CredentialsRequest) returns (CredentialsResponse); +} + + +message CredentialsRequest { + string Host = 1; +} + +message CredentialsResponse { + string Username = 1; + string Secret = 2; +} diff --git a/vendor/github.com/moby/buildkit/session/auth/generate.go b/vendor/github.com/moby/buildkit/session/auth/generate.go new file mode 100644 index 0000000000..687aa7cc0b --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/auth/generate.go @@ -0,0 +1,3 @@ +package auth + +//go:generate protoc --gogoslick_out=plugins=grpc:. auth.proto diff --git a/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go b/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go index 7f8bf3c0df..d0f8e76df3 100644 --- a/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go +++ b/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go @@ -12,10 +12,11 @@ import ( "google.golang.org/grpc" ) -func sendDiffCopy(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb, _map func(*fsutil.Stat) bool) error { +func sendDiffCopy(stream grpc.Stream, dir string, includes, excludes, followPaths []string, progress progressCb, _map func(*fsutil.Stat) bool) error { return fsutil.Send(stream.Context(), stream, dir, &fsutil.WalkOpt{ ExcludePatterns: excludes, IncludePatterns: includes, + FollowPaths: followPaths, Map: _map, }, progress) } diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.go b/vendor/github.com/moby/buildkit/session/filesync/filesync.go index 232a696d73..ee2668f06b 100644 --- a/vendor/github.com/moby/buildkit/session/filesync/filesync.go +++ b/vendor/github.com/moby/buildkit/session/filesync/filesync.go @@ -18,6 +18,7 @@ const ( keyOverrideExcludes = "override-excludes" keyIncludePatterns = "include-patterns" keyExcludePatterns = "exclude-patterns" + keyFollowPaths = "followpaths" keyDirName = "dir-name" ) @@ -87,6 +88,8 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retEr } includes := opts[keyIncludePatterns] + followPaths := opts[keyFollowPaths] + var progress progressCb if sp.p != nil { progress = sp.p @@ -98,7 +101,7 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retEr doneCh = sp.doneCh sp.doneCh = nil } - err := pr.sendFn(stream, dir.Dir, includes, excludes, progress, dir.Map) + err := pr.sendFn(stream, dir.Dir, includes, excludes, followPaths, progress, dir.Map) if doneCh != nil { if err != nil { doneCh <- err @@ -117,7 +120,7 @@ type progressCb func(int, bool) type protocol struct { name string - sendFn func(stream grpc.Stream, srcDir string, includes, excludes []string, progress progressCb, _map func(*fsutil.Stat) bool) error + sendFn func(stream grpc.Stream, srcDir string, includes, excludes, followPaths []string, progress progressCb, _map func(*fsutil.Stat) bool) error recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater, progress progressCb) error } @@ -142,6 +145,7 @@ type FSSendRequestOpt struct { Name string IncludePatterns []string ExcludePatterns []string + FollowPaths []string OverrideExcludes bool // deprecated: this is used by docker/cli for automatically loading .dockerignore from the directory DestDir string CacheUpdater CacheUpdater @@ -181,6 +185,10 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error { opts[keyExcludePatterns] = opt.ExcludePatterns } + if opt.FollowPaths != nil { + opts[keyFollowPaths] = opt.FollowPaths + } + opts[keyDirName] = []string{opt.Name} ctx, cancel := context.WithCancel(ctx) @@ -261,7 +269,7 @@ func CopyToCaller(ctx context.Context, srcPath string, c session.Caller, progres return err } - return sendDiffCopy(cc, srcPath, nil, nil, progress, nil) + return sendDiffCopy(cc, srcPath, nil, nil, nil, progress, nil) } func CopyFileWriter(ctx context.Context, c session.Caller) (io.WriteCloser, error) { diff --git a/vendor/github.com/moby/buildkit/session/grpchijack/dial.go b/vendor/github.com/moby/buildkit/session/grpchijack/dial.go new file mode 100644 index 0000000000..151ab5498f --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/grpchijack/dial.go @@ -0,0 +1,156 @@ +package grpchijack + +import ( + "context" + "io" + "net" + "strings" + "sync" + "time" + + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/session" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +func Dialer(api controlapi.ControlClient) session.Dialer { + return func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { + + meta = lowerHeaders(meta) + + md := metadata.MD(meta) + + ctx = metadata.NewOutgoingContext(ctx, md) + + stream, err := api.Session(ctx) + if err != nil { + return nil, err + } + + c, _ := streamToConn(stream) + return c, nil + } +} + +func streamToConn(stream grpc.Stream) (net.Conn, <-chan struct{}) { + closeCh := make(chan struct{}) + c := &conn{stream: stream, buf: make([]byte, 32*1<<10), closeCh: closeCh} + return c, closeCh +} + +type conn struct { + stream grpc.Stream + buf []byte + lastBuf []byte + + closedOnce sync.Once + readMu sync.Mutex + err error + closeCh chan struct{} +} + +func (c *conn) Read(b []byte) (n int, err error) { + c.readMu.Lock() + defer c.readMu.Unlock() + + if c.lastBuf != nil { + n := copy(b, c.lastBuf) + c.lastBuf = c.lastBuf[n:] + if len(c.lastBuf) == 0 { + c.lastBuf = nil + } + return n, nil + } + m := new(controlapi.BytesMessage) + m.Data = c.buf + + if err := c.stream.RecvMsg(m); err != nil { + return 0, err + } + c.buf = m.Data[:cap(m.Data)] + + n = copy(b, m.Data) + if n < len(m.Data) { + c.lastBuf = m.Data[n:] + } + + return n, nil +} + +func (c *conn) Write(b []byte) (int, error) { + m := &controlapi.BytesMessage{Data: b} + if err := c.stream.SendMsg(m); err != nil { + return 0, err + } + return len(b), nil +} + +func (c *conn) Close() (err error) { + c.closedOnce.Do(func() { + defer func() { + close(c.closeCh) + }() + + if cs, ok := c.stream.(grpc.ClientStream); ok { + err = cs.CloseSend() + if err != nil { + return + } + } + + c.readMu.Lock() + for { + m := new(controlapi.BytesMessage) + m.Data = c.buf + err = c.stream.RecvMsg(m) + if err != nil { + if err != io.EOF { + return + } + err = nil + break + } + c.buf = m.Data[:cap(m.Data)] + c.lastBuf = append(c.lastBuf, c.buf...) + } + c.readMu.Unlock() + + }) + return nil +} + +func (c *conn) LocalAddr() net.Addr { + return dummyAddr{} +} +func (c *conn) RemoteAddr() net.Addr { + return dummyAddr{} +} +func (c *conn) SetDeadline(t time.Time) error { + return nil +} +func (c *conn) SetReadDeadline(t time.Time) error { + return nil +} +func (c *conn) SetWriteDeadline(t time.Time) error { + return nil +} + +type dummyAddr struct { +} + +func (d dummyAddr) Network() string { + return "tcp" +} + +func (d dummyAddr) String() string { + return "localhost" +} + +func lowerHeaders(in map[string][]string) map[string][]string { + out := map[string][]string{} + for k := range in { + out[strings.ToLower(k)] = in[k] + } + return out +} diff --git a/vendor/github.com/moby/buildkit/session/grpchijack/hijack.go b/vendor/github.com/moby/buildkit/session/grpchijack/hijack.go new file mode 100644 index 0000000000..6e34b2164e --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/grpchijack/hijack.go @@ -0,0 +1,14 @@ +package grpchijack + +import ( + "net" + + controlapi "github.com/moby/buildkit/api/services/control" + "google.golang.org/grpc/metadata" +) + +func Hijack(stream controlapi.Control_SessionServer) (net.Conn, <-chan struct{}, map[string][]string) { + md, _ := metadata.FromIncomingContext(stream.Context()) + c, closeCh := streamToConn(stream) + return c, closeCh, md +} diff --git a/vendor/github.com/moby/buildkit/snapshot/blobmapping/snapshotter.go b/vendor/github.com/moby/buildkit/snapshot/blobmapping/snapshotter.go new file mode 100644 index 0000000000..e91241b7b6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/snapshot/blobmapping/snapshotter.go @@ -0,0 +1,129 @@ +package blobmapping + +import ( + "context" + + "github.com/boltdb/bolt" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/snapshots" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/snapshot" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +const blobKey = "blobmapping.blob" + +type Opt struct { + Content content.Store + Snapshotter snapshot.SnapshotterBase + MetadataStore *metadata.Store +} + +type Info struct { + snapshots.Info + Blob string +} + +type DiffPair struct { + Blobsum digest.Digest + DiffID digest.Digest +} + +// this snapshotter keeps an internal mapping between a snapshot and a blob + +type Snapshotter struct { + snapshot.SnapshotterBase + opt Opt +} + +func NewSnapshotter(opt Opt) snapshot.Snapshotter { + s := &Snapshotter{ + SnapshotterBase: opt.Snapshotter, + opt: opt, + } + + return s +} + +// Remove also removes a reference to a blob. If it is a last reference then it deletes it the blob as well +// Remove is not safe to be called concurrently +func (s *Snapshotter) Remove(ctx context.Context, key string) error { + _, blob, err := s.GetBlob(ctx, key) + if err != nil { + return err + } + + blobs, err := s.opt.MetadataStore.Search(index(blob)) + if err != nil { + return err + } + + if err := s.SnapshotterBase.Remove(ctx, key); err != nil { + return err + } + + if len(blobs) == 1 && blobs[0].ID() == key { // last snapshot + if err := s.opt.Content.Delete(ctx, blob); err != nil { + logrus.Errorf("failed to delete blob %v: %+v", blob, err) + } + } + return nil +} + +func (s *Snapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) { + u, err := s.SnapshotterBase.Usage(ctx, key) + if err != nil { + return snapshots.Usage{}, err + } + _, blob, err := s.GetBlob(ctx, key) + if err != nil { + return u, err + } + if blob != "" { + info, err := s.opt.Content.Info(ctx, blob) + if err != nil { + return u, err + } + (&u).Add(snapshots.Usage{Size: info.Size, Inodes: 1}) + } + return u, nil +} + +func (s *Snapshotter) GetBlob(ctx context.Context, key string) (digest.Digest, digest.Digest, error) { + md, _ := s.opt.MetadataStore.Get(key) + v := md.Get(blobKey) + if v == nil { + return "", "", nil + } + var blob DiffPair + if err := v.Unmarshal(&blob); err != nil { + return "", "", err + } + return blob.DiffID, blob.Blobsum, nil +} + +// Validates that there is no blob associated with the snapshot. +// Checks that there is a blob in the content store. +// If same blob has already been set then this is a noop. +func (s *Snapshotter) SetBlob(ctx context.Context, key string, diffID, blobsum digest.Digest) error { + _, err := s.opt.Content.Info(ctx, blobsum) + if err != nil { + return err + } + md, _ := s.opt.MetadataStore.Get(key) + + v, err := metadata.NewValue(DiffPair{DiffID: diffID, Blobsum: blobsum}) + if err != nil { + return err + } + v.Index = index(blobsum) + + return md.Update(func(b *bolt.Bucket) error { + return md.SetValue(b, blobKey, v) + }) +} + +func index(blob digest.Digest) string { + return "blobmap::" + blob.String() +} diff --git a/vendor/github.com/moby/buildkit/snapshot/localmounter.go b/vendor/github.com/moby/buildkit/snapshot/localmounter.go new file mode 100644 index 0000000000..18e2411cfc --- /dev/null +++ b/vendor/github.com/moby/buildkit/snapshot/localmounter.go @@ -0,0 +1,72 @@ +package snapshot + +import ( + "io/ioutil" + "os" + "sync" + + "github.com/containerd/containerd/mount" + "github.com/pkg/errors" +) + +type Mounter interface { + Mount() (string, error) + Unmount() error +} + +// LocalMounter is a helper for mounting mountfactory to temporary path. In +// addition it can mount binds without privileges +func LocalMounter(mountable Mountable) Mounter { + return &localMounter{mountable: mountable} +} + +// LocalMounterWithMounts is a helper for mounting to temporary path. In +// addition it can mount binds without privileges +func LocalMounterWithMounts(mounts []mount.Mount) Mounter { + return &localMounter{mounts: mounts} +} + +type localMounter struct { + mu sync.Mutex + mounts []mount.Mount + mountable Mountable + target string +} + +func (lm *localMounter) Mount() (string, error) { + lm.mu.Lock() + defer lm.mu.Unlock() + + if lm.mounts == nil { + mounts, err := lm.mountable.Mount() + if err != nil { + return "", err + } + lm.mounts = mounts + } + + if len(lm.mounts) == 1 && (lm.mounts[0].Type == "bind" || lm.mounts[0].Type == "rbind") { + ro := false + for _, opt := range lm.mounts[0].Options { + if opt == "ro" { + ro = true + break + } + } + if !ro { + return lm.mounts[0].Source, nil + } + } + + dir, err := ioutil.TempDir("", "buildkit-mount") + if err != nil { + return "", errors.Wrap(err, "failed to create temp dir") + } + + if err := mount.All(lm.mounts, dir); err != nil { + os.RemoveAll(dir) + return "", errors.Wrapf(err, "failed to mount %s: %+v", dir, lm.mounts) + } + lm.target = dir + return dir, nil +} diff --git a/vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go b/vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go new file mode 100644 index 0000000000..c44e435e99 --- /dev/null +++ b/vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go @@ -0,0 +1,29 @@ +// +build !windows + +package snapshot + +import ( + "os" + "syscall" + + "github.com/containerd/containerd/mount" +) + +func (lm *localMounter) Unmount() error { + lm.mu.Lock() + defer lm.mu.Unlock() + + if lm.target != "" { + if err := mount.Unmount(lm.target, syscall.MNT_DETACH); err != nil { + return err + } + os.RemoveAll(lm.target) + lm.target = "" + } + + if lm.mountable != nil { + return lm.mountable.Release() + } + + return nil +} diff --git a/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go b/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go new file mode 100644 index 0000000000..4e1287b0d8 --- /dev/null +++ b/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go @@ -0,0 +1,26 @@ +package snapshot + +import ( + "os" + + "github.com/containerd/containerd/mount" +) + +func (lm *localMounter) Unmount() error { + lm.mu.Lock() + defer lm.mu.Unlock() + + if lm.target != "" { + if err := mount.Unmount(lm.target, 0); err != nil { + return err + } + os.RemoveAll(lm.target) + lm.target = "" + } + + if lm.mountable != nil { + return lm.mountable.Release() + } + + return nil +} diff --git a/vendor/github.com/moby/buildkit/snapshot/snapshotter.go b/vendor/github.com/moby/buildkit/snapshot/snapshotter.go new file mode 100644 index 0000000000..ad7fcaf2dc --- /dev/null +++ b/vendor/github.com/moby/buildkit/snapshot/snapshotter.go @@ -0,0 +1,137 @@ +package snapshot + +import ( + "context" + "sync" + + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/snapshots" + digest "github.com/opencontainers/go-digest" +) + +type Mountable interface { + // ID() string + Mount() ([]mount.Mount, error) + Release() error +} + +type SnapshotterBase interface { + Mounts(ctx context.Context, key string) (Mountable, error) + Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) error + View(ctx context.Context, key, parent string, opts ...snapshots.Opt) (Mountable, error) + + Stat(ctx context.Context, key string) (snapshots.Info, error) + Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) + Usage(ctx context.Context, key string) (snapshots.Usage, error) + Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error + Remove(ctx context.Context, key string) error + Walk(ctx context.Context, fn func(context.Context, snapshots.Info) error) error + Close() error +} + +// Snapshotter defines interface that any snapshot implementation should satisfy +type Snapshotter interface { + Blobmapper + SnapshotterBase +} + +type Blobmapper interface { + GetBlob(ctx context.Context, key string) (digest.Digest, digest.Digest, error) + SetBlob(ctx context.Context, key string, diffID, blob digest.Digest) error +} + +func FromContainerdSnapshotter(s snapshots.Snapshotter) SnapshotterBase { + return &fromContainerd{Snapshotter: s} +} + +type fromContainerd struct { + snapshots.Snapshotter +} + +func (s *fromContainerd) Mounts(ctx context.Context, key string) (Mountable, error) { + mounts, err := s.Snapshotter.Mounts(ctx, key) + if err != nil { + return nil, err + } + return &staticMountable{mounts}, nil +} +func (s *fromContainerd) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) error { + _, err := s.Snapshotter.Prepare(ctx, key, parent, opts...) + return err +} +func (s *fromContainerd) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) (Mountable, error) { + mounts, err := s.Snapshotter.View(ctx, key, parent, opts...) + if err != nil { + return nil, err + } + return &staticMountable{mounts}, nil +} + +type staticMountable struct { + mounts []mount.Mount +} + +func (m *staticMountable) Mount() ([]mount.Mount, error) { + return m.mounts, nil +} + +func (cm *staticMountable) Release() error { + return nil +} + +// NewContainerdSnapshotter converts snapshotter to containerd snapshotter +func NewContainerdSnapshotter(s Snapshotter) (snapshots.Snapshotter, func() error) { + cs := &containerdSnapshotter{Snapshotter: s} + return cs, cs.release +} + +type containerdSnapshotter struct { + mu sync.Mutex + releasers []func() error + Snapshotter +} + +func (cs *containerdSnapshotter) release() error { + cs.mu.Lock() + defer cs.mu.Unlock() + var err error + for _, f := range cs.releasers { + if err1 := f(); err != nil && err == nil { + err = err1 + } + } + return err +} + +func (cs *containerdSnapshotter) returnMounts(mf Mountable) ([]mount.Mount, error) { + mounts, err := mf.Mount() + if err != nil { + return nil, err + } + cs.mu.Lock() + cs.releasers = append(cs.releasers, mf.Release) + cs.mu.Unlock() + return mounts, nil +} + +func (cs *containerdSnapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) { + mf, err := cs.Snapshotter.Mounts(ctx, key) + if err != nil { + return nil, err + } + return cs.returnMounts(mf) +} + +func (cs *containerdSnapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + if err := cs.Snapshotter.Prepare(ctx, key, parent, opts...); err != nil { + return nil, err + } + return cs.Mounts(ctx, key) +} +func (cs *containerdSnapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + mf, err := cs.Snapshotter.View(ctx, key, parent, opts...) + if err != nil { + return nil, err + } + return cs.returnMounts(mf) +} diff --git a/vendor/github.com/moby/buildkit/solver/boltdbcachestorage/storage.go b/vendor/github.com/moby/buildkit/solver/boltdbcachestorage/storage.go new file mode 100644 index 0000000000..8f89044912 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/boltdbcachestorage/storage.go @@ -0,0 +1,449 @@ +package boltdbcachestorage + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/boltdb/bolt" + "github.com/moby/buildkit/solver" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +const ( + resultBucket = "_result" + linksBucket = "_links" + byResultBucket = "_byresult" + backlinksBucket = "_backlinks" +) + +type Store struct { + db *bolt.DB +} + +func NewStore(dbPath string) (*Store, error) { + db, err := bolt.Open(dbPath, 0600, nil) + if err != nil { + return nil, errors.Wrapf(err, "failed to open database file %s", dbPath) + } + if err := db.Update(func(tx *bolt.Tx) error { + for _, b := range []string{resultBucket, linksBucket, byResultBucket, backlinksBucket} { + if _, err := tx.CreateBucketIfNotExists([]byte(b)); err != nil { + return err + } + } + return nil + }); err != nil { + return nil, err + } + db.NoSync = true + return &Store{db: db}, nil +} + +func (s *Store) Exists(id string) bool { + exists := false + err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(linksBucket)).Bucket([]byte(id)) + exists = b != nil + return nil + }) + if err != nil { + return false + } + return exists +} + +func (s *Store) Walk(fn func(id string) error) error { + ids := make([]string, 0) + if err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(linksBucket)) + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if v == nil { + ids = append(ids, string(k)) + } + } + return nil + }); err != nil { + return err + } + for _, id := range ids { + if err := fn(id); err != nil { + return err + } + } + return nil +} + +func (s *Store) WalkResults(id string, fn func(solver.CacheResult) error) error { + var list []solver.CacheResult + if err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(resultBucket)) + if b == nil { + return nil + } + b = b.Bucket([]byte(id)) + if b == nil { + return nil + } + + return b.ForEach(func(k, v []byte) error { + var res solver.CacheResult + if err := json.Unmarshal(v, &res); err != nil { + return err + } + list = append(list, res) + return nil + }) + }); err != nil { + return err + } + for _, res := range list { + if err := fn(res); err != nil { + return err + } + } + return nil +} + +func (s *Store) Load(id string, resultID string) (solver.CacheResult, error) { + var res solver.CacheResult + if err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(resultBucket)) + if b == nil { + return errors.WithStack(solver.ErrNotFound) + } + b = b.Bucket([]byte(id)) + if b == nil { + return errors.WithStack(solver.ErrNotFound) + } + + v := b.Get([]byte(resultID)) + if v == nil { + return errors.WithStack(solver.ErrNotFound) + } + + return json.Unmarshal(v, &res) + }); err != nil { + return solver.CacheResult{}, err + } + return res, nil +} + +func (s *Store) AddResult(id string, res solver.CacheResult) error { + return s.db.Update(func(tx *bolt.Tx) error { + b, err := tx.Bucket([]byte(resultBucket)).CreateBucketIfNotExists([]byte(id)) + if err != nil { + return err + } + dt, err := json.Marshal(res) + if err != nil { + return err + } + if err := b.Put([]byte(res.ID), dt); err != nil { + return err + } + b, err = tx.Bucket([]byte(byResultBucket)).CreateBucketIfNotExists([]byte(res.ID)) + if err != nil { + return err + } + if err := b.Put([]byte(id), []byte{}); err != nil { + return err + } + + return nil + }) +} + +func (s *Store) WalkIDsByResult(resultID string, fn func(string) error) error { + ids := map[string]struct{}{} + if err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(byResultBucket)) + if b == nil { + return nil + } + b = b.Bucket([]byte(resultID)) + if b == nil { + return nil + } + return b.ForEach(func(k, v []byte) error { + ids[string(k)] = struct{}{} + return nil + }) + }); err != nil { + return err + } + for id := range ids { + if err := fn(id); err != nil { + return err + } + } + return nil +} + +func (s *Store) Release(resultID string) error { + return s.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(byResultBucket)) + if b == nil { + return errors.WithStack(solver.ErrNotFound) + } + b = b.Bucket([]byte(resultID)) + if b == nil { + return errors.WithStack(solver.ErrNotFound) + } + if err := b.ForEach(func(k, v []byte) error { + return s.releaseHelper(tx, string(k), resultID) + }); err != nil { + return err + } + return nil + }) +} + +func (s *Store) releaseHelper(tx *bolt.Tx, id, resultID string) error { + results := tx.Bucket([]byte(resultBucket)).Bucket([]byte(id)) + if results == nil { + return nil + } + + if err := results.Delete([]byte(resultID)); err != nil { + return err + } + + ids := tx.Bucket([]byte(byResultBucket)) + + ids = ids.Bucket([]byte(resultID)) + if ids == nil { + return nil + } + + if err := ids.Delete([]byte(resultID)); err != nil { + return err + } + + if isEmptyBucket(ids) { + if err := tx.Bucket([]byte(byResultBucket)).DeleteBucket([]byte(resultID)); err != nil { + return err + } + } + + links := tx.Bucket([]byte(resultBucket)) + if results == nil { + return nil + } + links = links.Bucket([]byte(id)) + + return s.emptyBranchWithParents(tx, []byte(id)) +} + +func (s *Store) emptyBranchWithParents(tx *bolt.Tx, id []byte) error { + results := tx.Bucket([]byte(resultBucket)).Bucket(id) + if results == nil { + return nil + } + + isEmptyLinks := true + links := tx.Bucket([]byte(linksBucket)).Bucket(id) + if links != nil { + isEmptyLinks = isEmptyBucket(links) + } + + if !isEmptyBucket(results) || !isEmptyLinks { + return nil + } + + if backlinks := tx.Bucket([]byte(backlinksBucket)).Bucket(id); backlinks != nil { + if err := backlinks.ForEach(func(k, v []byte) error { + if subLinks := tx.Bucket([]byte(linksBucket)).Bucket(k); subLinks != nil { + if err := subLinks.ForEach(func(k, v []byte) error { + parts := bytes.Split(k, []byte("@")) + if len(parts) != 2 { + return errors.Errorf("invalid key %s", k) + } + if bytes.Equal(id, parts[1]) { + return subLinks.Delete(k) + } + return nil + }); err != nil { + return err + } + + if isEmptyBucket(subLinks) { + if err := tx.Bucket([]byte(linksBucket)).DeleteBucket(k); err != nil { + return err + } + } + } + return s.emptyBranchWithParents(tx, k) + }); err != nil { + return err + } + if err := tx.Bucket([]byte(backlinksBucket)).DeleteBucket(id); err != nil { + return err + } + } + return nil +} + +func (s *Store) AddLink(id string, link solver.CacheInfoLink, target string) error { + return s.db.Update(func(tx *bolt.Tx) error { + b, err := tx.Bucket([]byte(linksBucket)).CreateBucketIfNotExists([]byte(id)) + if err != nil { + return err + } + + dt, err := json.Marshal(link) + if err != nil { + return err + } + + if err := b.Put(bytes.Join([][]byte{dt, []byte(target)}, []byte("@")), []byte{}); err != nil { + return err + } + + b, err = tx.Bucket([]byte(backlinksBucket)).CreateBucketIfNotExists([]byte(target)) + if err != nil { + return err + } + + if err := b.Put([]byte(id), []byte{}); err != nil { + return err + } + + return nil + }) +} + +func (s *Store) WalkLinks(id string, link solver.CacheInfoLink, fn func(id string) error) error { + var links []string + if err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(linksBucket)) + if b == nil { + return nil + } + b = b.Bucket([]byte(id)) + if b == nil { + return nil + } + + dt, err := json.Marshal(link) + if err != nil { + return err + } + index := bytes.Join([][]byte{dt, {}}, []byte("@")) + c := b.Cursor() + k, _ := c.Seek([]byte(index)) + for { + if k != nil && bytes.HasPrefix(k, index) { + target := bytes.TrimPrefix(k, index) + links = append(links, string(target)) + k, _ = c.Next() + } else { + break + } + } + + return nil + }); err != nil { + return err + } + for _, l := range links { + if err := fn(l); err != nil { + return err + } + } + return nil +} + +func (s *Store) HasLink(id string, link solver.CacheInfoLink, target string) bool { + var v bool + if err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(linksBucket)) + if b == nil { + return nil + } + b = b.Bucket([]byte(id)) + if b == nil { + return nil + } + + dt, err := json.Marshal(link) + if err != nil { + return err + } + v = b.Get(bytes.Join([][]byte{dt, []byte(target)}, []byte("@"))) != nil + return nil + }); err != nil { + return false + } + return v +} + +func (s *Store) WalkBacklinks(id string, fn func(id string, link solver.CacheInfoLink) error) error { + var outIDs []string + var outLinks []solver.CacheInfoLink + + if err := s.db.View(func(tx *bolt.Tx) error { + links := tx.Bucket([]byte(linksBucket)) + if links == nil { + return nil + } + backLinks := tx.Bucket([]byte(backlinksBucket)) + if backLinks == nil { + return nil + } + b := backLinks.Bucket([]byte(id)) + if b == nil { + return nil + } + + if err := b.ForEach(func(bid, v []byte) error { + b = links.Bucket(bid) + if b == nil { + return nil + } + if err := b.ForEach(func(k, v []byte) error { + parts := bytes.Split(k, []byte("@")) + if len(parts) == 2 { + if string(parts[1]) != id { + return nil + } + var l solver.CacheInfoLink + if err := json.Unmarshal(parts[0], &l); err != nil { + return err + } + l.Digest = digest.FromBytes([]byte(fmt.Sprintf("%s@%d", l.Digest, l.Output))) + l.Output = 0 + outIDs = append(outIDs, string(bid)) + outLinks = append(outLinks, l) + } + return nil + }); err != nil { + return err + } + return nil + }); err != nil { + return err + } + + return nil + }); err != nil { + return err + } + + for i := range outIDs { + if err := fn(outIDs[i], outLinks[i]); err != nil { + return err + } + } + return nil +} + +func isEmptyBucket(b *bolt.Bucket) bool { + if b == nil { + return true + } + k, _ := b.Cursor().First() + return k == nil +} diff --git a/vendor/github.com/moby/buildkit/solver/cachekey.go b/vendor/github.com/moby/buildkit/solver/cachekey.go new file mode 100644 index 0000000000..3749af0ab3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/cachekey.go @@ -0,0 +1,66 @@ +package solver + +import ( + "sync" + + digest "github.com/opencontainers/go-digest" +) + +// NewCacheKey creates a new cache key for a specific output index +func NewCacheKey(dgst digest.Digest, output Index) *CacheKey { + return &CacheKey{ + ID: rootKey(dgst, output).String(), + digest: dgst, + output: output, + ids: map[*cacheManager]string{}, + } +} + +// CacheKeyWithSelector combines a cache key with an optional selector digest. +// Used to limit the matches for dependency cache key. +type CacheKeyWithSelector struct { + Selector digest.Digest + CacheKey ExportableCacheKey +} + +type CacheKey struct { + mu sync.RWMutex + + ID string + deps [][]CacheKeyWithSelector // only [][]*inMemoryCacheKey + digest digest.Digest + output Index + ids map[*cacheManager]string + + indexIDs []string +} + +func (ck *CacheKey) Deps() [][]CacheKeyWithSelector { + ck.mu.RLock() + defer ck.mu.RUnlock() + deps := make([][]CacheKeyWithSelector, len(ck.deps)) + for i := range ck.deps { + deps[i] = append([]CacheKeyWithSelector(nil), ck.deps[i]...) + } + return deps +} + +func (ck *CacheKey) Digest() digest.Digest { + return ck.digest +} +func (ck *CacheKey) Output() Index { + return ck.output +} + +func (ck *CacheKey) clone() *CacheKey { + nk := &CacheKey{ + ID: ck.ID, + digest: ck.digest, + output: ck.output, + ids: map[*cacheManager]string{}, + } + for cm, id := range ck.ids { + nk.ids[cm] = id + } + return nk +} diff --git a/vendor/github.com/moby/buildkit/solver/cachemanager.go b/vendor/github.com/moby/buildkit/solver/cachemanager.go new file mode 100644 index 0000000000..12a923503d --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/cachemanager.go @@ -0,0 +1,270 @@ +package solver + +import ( + "context" + "fmt" + "sync" + + "github.com/moby/buildkit/identity" + digest "github.com/opencontainers/go-digest" +) + +type CacheID string + +func NewInMemoryCacheManager() CacheManager { + return NewCacheManager(identity.NewID(), NewInMemoryCacheStorage(), NewInMemoryResultStorage()) +} + +func NewCacheManager(id string, storage CacheKeyStorage, results CacheResultStorage) CacheManager { + cm := &cacheManager{ + id: id, + backend: storage, + results: results, + } + + storage.Walk(func(id string) error { + return storage.WalkResults(id, func(cr CacheResult) error { + if !results.Exists(cr.ID) { + storage.Release(cr.ID) + } + return nil + }) + }) + + return cm +} + +type cacheManager struct { + mu sync.RWMutex + id string + + backend CacheKeyStorage + results CacheResultStorage +} + +func (c *cacheManager) ID() string { + return c.id +} + +func (c *cacheManager) Query(deps []CacheKeyWithSelector, input Index, dgst digest.Digest, output Index) ([]*CacheKey, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + type dep struct { + results map[string]struct{} + key CacheKeyWithSelector + } + + allDeps := make([]dep, 0, len(deps)) + for _, k := range deps { + allDeps = append(allDeps, dep{key: k, results: map[string]struct{}{}}) + } + + allRes := map[string]*CacheKey{} + for _, d := range allDeps { + if err := c.backend.WalkLinks(c.getID(d.key.CacheKey.CacheKey), CacheInfoLink{input, output, dgst, d.key.Selector}, func(id string) error { + d.results[id] = struct{}{} + if _, ok := allRes[id]; !ok { + allRes[id] = c.newKeyWithID(id, dgst, output) + } + return nil + }); err != nil { + return nil, err + } + } + + // link the results against the keys that didn't exist + for id, key := range allRes { + for _, d := range allDeps { + if _, ok := d.results[id]; !ok { + if err := c.backend.AddLink(c.getID(d.key.CacheKey.CacheKey), CacheInfoLink{ + Input: input, + Output: output, + Digest: dgst, + Selector: d.key.Selector, + }, c.getID(key)); err != nil { + return nil, err + } + } + } + } + + if len(deps) == 0 { + if !c.backend.Exists(rootKey(dgst, output).String()) { + return nil, nil + } + return []*CacheKey{c.newRootKey(dgst, output)}, nil + } + + keys := make([]*CacheKey, 0, len(deps)) + for _, k := range allRes { + keys = append(keys, k) + } + return keys, nil +} + +func (c *cacheManager) Records(ck *CacheKey) ([]*CacheRecord, error) { + outs := make([]*CacheRecord, 0) + if err := c.backend.WalkResults(c.getID(ck), func(r CacheResult) error { + if c.results.Exists(r.ID) { + outs = append(outs, &CacheRecord{ + ID: r.ID, + cacheManager: c, + key: ck, + CreatedAt: r.CreatedAt, + }) + } else { + c.backend.Release(r.ID) + } + return nil + }); err != nil { + return nil, err + } + return outs, nil +} + +func (c *cacheManager) Load(ctx context.Context, rec *CacheRecord) (Result, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + res, err := c.backend.Load(c.getID(rec.key), rec.ID) + if err != nil { + return nil, err + } + + return c.results.Load(ctx, res) +} + +func (c *cacheManager) Save(k *CacheKey, r Result) (*ExportableCacheKey, error) { + c.mu.Lock() + defer c.mu.Unlock() + + res, err := c.results.Save(r) + if err != nil { + return nil, err + } + + if err := c.backend.AddResult(c.getID(k), res); err != nil { + return nil, err + } + + if err := c.ensurePersistentKey(k); err != nil { + return nil, err + } + + rec := &CacheRecord{ + ID: res.ID, + cacheManager: c, + key: k, + CreatedAt: res.CreatedAt, + } + + return &ExportableCacheKey{ + CacheKey: k, + Exporter: &exporter{k: k, record: rec}, + }, nil +} + +func newKey() *CacheKey { + return &CacheKey{ids: map[*cacheManager]string{}} +} + +func (c *cacheManager) newKeyWithID(id string, dgst digest.Digest, output Index) *CacheKey { + k := newKey() + k.digest = dgst + k.output = output + k.ID = id + k.ids[c] = id + return k +} + +func (c *cacheManager) newRootKey(dgst digest.Digest, output Index) *CacheKey { + return c.newKeyWithID(rootKey(dgst, output).String(), dgst, output) +} + +func (c *cacheManager) getID(k *CacheKey) string { + k.mu.Lock() + id, ok := k.ids[c] + if ok { + k.mu.Unlock() + return id + } + if len(k.deps) == 0 { + k.ids[c] = k.ID + k.mu.Unlock() + return k.ID + } + id = c.getIDFromDeps(k) + k.ids[c] = id + k.mu.Unlock() + return id +} + +func (c *cacheManager) ensurePersistentKey(k *CacheKey) error { + id := c.getID(k) + for i, deps := range k.Deps() { + for _, ck := range deps { + l := CacheInfoLink{ + Input: Index(i), + Output: Index(k.Output()), + Digest: k.Digest(), + Selector: ck.Selector, + } + ckID := c.getID(ck.CacheKey.CacheKey) + if !c.backend.HasLink(ckID, l, id) { + if err := c.ensurePersistentKey(ck.CacheKey.CacheKey); err != nil { + return err + } + if err := c.backend.AddLink(ckID, l, id); err != nil { + return err + } + } + } + } + return nil +} + +func (c *cacheManager) getIDFromDeps(k *CacheKey) string { + matches := map[string]struct{}{} + + for i, deps := range k.deps { + if i == 0 || len(matches) > 0 { + for _, ck := range deps { + m2 := make(map[string]struct{}) + if err := c.backend.WalkLinks(c.getID(ck.CacheKey.CacheKey), CacheInfoLink{ + Input: Index(i), + Output: Index(k.Output()), + Digest: k.Digest(), + Selector: ck.Selector, + }, func(id string) error { + if i == 0 { + matches[id] = struct{}{} + } else { + m2[id] = struct{}{} + } + return nil + }); err != nil { + matches = map[string]struct{}{} + break + } + if i != 0 { + for id := range matches { + if _, ok := m2[id]; !ok { + delete(matches, id) + } + } + } + } + } + } + + for k := range matches { + return k + } + + return identity.NewID() +} + +func rootKey(dgst digest.Digest, output Index) digest.Digest { + return digest.FromBytes([]byte(fmt.Sprintf("%s@%d", dgst, output))) +} diff --git a/vendor/github.com/moby/buildkit/solver/cachestorage.go b/vendor/github.com/moby/buildkit/solver/cachestorage.go new file mode 100644 index 0000000000..65225f757b --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/cachestorage.go @@ -0,0 +1,51 @@ +package solver + +import ( + "context" + "time" + + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +var ErrNotFound = errors.Errorf("not found") + +// CacheKeyStorage is interface for persisting cache metadata +type CacheKeyStorage interface { + Exists(id string) bool + Walk(fn func(id string) error) error + + WalkResults(id string, fn func(CacheResult) error) error + Load(id string, resultID string) (CacheResult, error) + AddResult(id string, res CacheResult) error + Release(resultID string) error + WalkIDsByResult(resultID string, fn func(string) error) error + + AddLink(id string, link CacheInfoLink, target string) error + WalkLinks(id string, link CacheInfoLink, fn func(id string) error) error + HasLink(id string, link CacheInfoLink, target string) bool + WalkBacklinks(id string, fn func(id string, link CacheInfoLink) error) error +} + +// CacheResult is a record for a single solve result +type CacheResult struct { + CreatedAt time.Time + ID string +} + +// CacheInfoLink is a link between two cache keys +type CacheInfoLink struct { + Input Index `json:"Input,omitempty"` + Output Index `json:"Output,omitempty"` + Digest digest.Digest `json:"Digest,omitempty"` + Selector digest.Digest `json:"Selector,omitempty"` +} + +// CacheResultStorage is interface for converting cache metadata result to +// actual solve result +type CacheResultStorage interface { + Save(Result) (CacheResult, error) + Load(ctx context.Context, res CacheResult) (Result, error) + LoadRemote(ctx context.Context, res CacheResult) (*Remote, error) + Exists(id string) bool +} diff --git a/vendor/github.com/moby/buildkit/solver/combinedcache.go b/vendor/github.com/moby/buildkit/solver/combinedcache.go new file mode 100644 index 0000000000..b4205d3ed0 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/combinedcache.go @@ -0,0 +1,124 @@ +package solver + +import ( + "context" + "strings" + "sync" + + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +func newCombinedCacheManager(cms []CacheManager, main CacheManager) CacheManager { + return &combinedCacheManager{cms: cms, main: main} +} + +type combinedCacheManager struct { + cms []CacheManager + main CacheManager + id string + idOnce sync.Once +} + +func (cm *combinedCacheManager) ID() string { + cm.idOnce.Do(func() { + ids := make([]string, len(cm.cms)) + for i, c := range cm.cms { + ids[i] = c.ID() + } + cm.id = digest.FromBytes([]byte(strings.Join(ids, ","))).String() + }) + return cm.id +} + +func (cm *combinedCacheManager) Query(inp []CacheKeyWithSelector, inputIndex Index, dgst digest.Digest, outputIndex Index) ([]*CacheKey, error) { + eg, _ := errgroup.WithContext(context.TODO()) + keys := make(map[string]*CacheKey, len(cm.cms)) + var mu sync.Mutex + for _, c := range cm.cms { + func(c CacheManager) { + eg.Go(func() error { + recs, err := c.Query(inp, inputIndex, dgst, outputIndex) + if err != nil { + return err + } + mu.Lock() + for _, r := range recs { + if _, ok := keys[r.ID]; !ok || c == cm.main { + keys[r.ID] = r + } + } + mu.Unlock() + return nil + }) + }(c) + } + + if err := eg.Wait(); err != nil { + return nil, err + } + + out := make([]*CacheKey, 0, len(keys)) + for _, k := range keys { + out = append(out, k) + } + return out, nil +} + +func (cm *combinedCacheManager) Load(ctx context.Context, rec *CacheRecord) (Result, error) { + res, err := rec.cacheManager.Load(ctx, rec) + if err != nil { + return nil, err + } + if _, err := cm.main.Save(rec.key, res); err != nil { + return nil, err + } + return res, nil +} + +func (cm *combinedCacheManager) Save(key *CacheKey, s Result) (*ExportableCacheKey, error) { + return cm.main.Save(key, s) +} + +func (cm *combinedCacheManager) Records(ck *CacheKey) ([]*CacheRecord, error) { + if len(ck.ids) == 0 { + return nil, errors.Errorf("no results") + } + + records := map[string]*CacheRecord{} + var mu sync.Mutex + + eg, _ := errgroup.WithContext(context.TODO()) + for c := range ck.ids { + func(c *cacheManager) { + eg.Go(func() error { + recs, err := c.Records(ck) + if err != nil { + return err + } + mu.Lock() + for _, rec := range recs { + if _, ok := records[rec.ID]; !ok || c == cm.main { + if c == cm.main { + rec.Priority = 1 + } + records[rec.ID] = rec + } + } + mu.Unlock() + return nil + }) + }(c) + } + + if err := eg.Wait(); err != nil { + return nil, err + } + + out := make([]*CacheRecord, 0, len(records)) + for _, rec := range records { + out = append(out, rec) + } + return out, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/edge.go b/vendor/github.com/moby/buildkit/solver/edge.go new file mode 100644 index 0000000000..1547cf4bd1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/edge.go @@ -0,0 +1,866 @@ +package solver + +import ( + "context" + "sync" + + "github.com/moby/buildkit/solver/internal/pipe" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type edgeStatusType int + +const ( + edgeStatusInitial edgeStatusType = iota + edgeStatusCacheFast + edgeStatusCacheSlow + edgeStatusComplete +) + +func (t edgeStatusType) String() string { + return []string{"initial", "cache-fast", "cache-slow", "complete"}[t] +} + +func newEdge(ed Edge, op activeOp, index *edgeIndex) *edge { + e := &edge{ + edge: ed, + op: op, + depRequests: map[pipe.Receiver]*dep{}, + keyMap: map[string]*CacheKey{}, + cacheRecords: map[string]*CacheRecord{}, + index: index, + } + return e +} + +type edge struct { + edge Edge + op activeOp + + edgeState + depRequests map[pipe.Receiver]*dep + deps []*dep + + cacheMapReq pipe.Receiver + cacheMapDone bool + cacheMapIndex int + cacheMapDigests []digest.Digest + execReq pipe.Receiver + err error + cacheRecords map[string]*CacheRecord + keyMap map[string]*CacheKey + + noCacheMatchPossible bool + allDepsCompletedCacheFast bool + allDepsCompletedCacheSlow bool + allDepsStateCacheSlow bool + allDepsCompleted bool + hasActiveOutgoing bool + + releaserCount int + keysDidChange bool + index *edgeIndex + + secondaryExporters []expDep +} + +// dep holds state for a dependant edge +type dep struct { + req pipe.Receiver + edgeState + index Index + keyMap map[string]*CacheKey + desiredState edgeStatusType + e *edge + slowCacheReq pipe.Receiver + slowCacheComplete bool + slowCacheFoundKey bool + slowCacheKey *ExportableCacheKey + err error +} + +// expDep holds secorndary exporter info for dependency +type expDep struct { + index int + cacheKey CacheKeyWithSelector +} + +func newDep(i Index) *dep { + return &dep{index: i, keyMap: map[string]*CacheKey{}} +} + +// edgePipe is a pipe for requests between two edges +type edgePipe struct { + *pipe.Pipe + From, Target *edge + mu sync.Mutex +} + +// edgeState hold basic mutable state info for an edge +type edgeState struct { + state edgeStatusType + result *SharedCachedResult + cacheMap *CacheMap + keys []ExportableCacheKey +} + +type edgeRequest struct { + desiredState edgeStatusType + currentState edgeState + currentKeys int +} + +// incrementReferenceCount increases the number of times release needs to be +// called to release the edge. Called on merging edges. +func (e *edge) incrementReferenceCount() { + e.releaserCount += 1 +} + +// release releases the edge resources +func (e *edge) release() { + if e.releaserCount > 0 { + e.releaserCount-- + return + } + e.index.Release(e) + if e.result != nil { + go e.result.Release(context.TODO()) + } +} + +// commitOptions returns parameters for the op execution +func (e *edge) commitOptions() ([]*CacheKey, []CachedResult) { + k := NewCacheKey(e.cacheMap.Digest, e.edge.Index) + if len(e.deps) == 0 { + keys := make([]*CacheKey, 0, len(e.cacheMapDigests)) + for _, dgst := range e.cacheMapDigests { + keys = append(keys, NewCacheKey(dgst, e.edge.Index)) + } + return keys, nil + } + + inputs := make([][]CacheKeyWithSelector, len(e.deps)) + results := make([]CachedResult, len(e.deps)) + for i, dep := range e.deps { + inputs[i] = append(inputs[i], CacheKeyWithSelector{CacheKey: dep.result.CacheKey(), Selector: e.cacheMap.Deps[i].Selector}) + if dep.slowCacheKey != nil { + inputs[i] = append(inputs[i], CacheKeyWithSelector{CacheKey: *dep.slowCacheKey}) + } + results[i] = dep.result + } + + k.deps = inputs + return []*CacheKey{k}, results +} + +// isComplete returns true if edge state is final and will never change +func (e *edge) isComplete() bool { + return e.err != nil || e.result != nil +} + +// finishIncoming finalizes the incoming pipe request +func (e *edge) finishIncoming(req pipe.Sender) { + err := e.err + if req.Request().Canceled && err == nil { + err = context.Canceled + } + if debugScheduler { + logrus.Debugf("finishIncoming %s %v %#v desired=%s", e.edge.Vertex.Name(), err, e.edgeState, req.Request().Payload.(*edgeRequest).desiredState) + } + req.Finalize(&e.edgeState, err) +} + +// updateIncoming updates the current value of incoming pipe request +func (e *edge) updateIncoming(req pipe.Sender) { + req.Update(&e.edgeState) +} + +// probeCache is called with unprocessed cache keys for dependency +// if the key could match the edge, the cacheRecords for dependency are filled +func (e *edge) probeCache(d *dep, depKeys []CacheKeyWithSelector) bool { + if len(depKeys) == 0 { + return false + } + if e.op.IgnoreCache() { + return false + } + keys, err := e.op.Cache().Query(depKeys, d.index, e.cacheMap.Digest, e.edge.Index) + if err != nil { + e.err = errors.Wrap(err, "error on cache query") + } + found := false + for _, k := range keys { + if _, ok := d.keyMap[k.ID]; !ok { + d.keyMap[k.ID] = k + found = true + } + } + return found +} + +// checkDepMatchPossible checks if any cache matches are possible past this point +func (e *edge) checkDepMatchPossible(dep *dep) { + depHasSlowCache := e.cacheMap.Deps[dep.index].ComputeDigestFunc != nil + if !e.noCacheMatchPossible && (((!dep.slowCacheFoundKey && dep.slowCacheComplete && depHasSlowCache) || (!depHasSlowCache && dep.state >= edgeStatusCacheSlow)) && len(dep.keys) == 0) { + e.noCacheMatchPossible = true + } +} + +// slowCacheFunc returns the result based cache func for dependency if it exists +func (e *edge) slowCacheFunc(dep *dep) ResultBasedCacheFunc { + if e.cacheMap == nil { + return nil + } + return e.cacheMap.Deps[int(dep.index)].ComputeDigestFunc +} + +// allDepsHaveKeys checks if all dependencies have at least one key. used for +// determining if there is enough data for combining cache key for edge +func (e *edge) allDepsHaveKeys() bool { + if e.cacheMap == nil { + return false + } + for _, d := range e.deps { + if len(d.keys) == 0 && d.slowCacheKey == nil && d.result == nil { + return false + } + } + return true +} + +// depKeys returns all current dependency cache keys +func (e *edge) currentIndexKey() *CacheKey { + if e.cacheMap == nil { + return nil + } + + keys := make([][]CacheKeyWithSelector, len(e.deps)) + for i, d := range e.deps { + if len(d.keys) == 0 && d.result == nil { + return nil + } + for _, k := range d.keys { + keys[i] = append(keys[i], CacheKeyWithSelector{Selector: e.cacheMap.Deps[i].Selector, CacheKey: k}) + } + if d.result != nil { + keys[i] = append(keys[i], CacheKeyWithSelector{Selector: e.cacheMap.Deps[i].Selector, CacheKey: d.result.CacheKey()}) + if d.slowCacheKey != nil { + keys[i] = append(keys[i], CacheKeyWithSelector{CacheKey: ExportableCacheKey{CacheKey: d.slowCacheKey.CacheKey, Exporter: &exporter{k: d.slowCacheKey.CacheKey}}}) + } + } + } + + k := NewCacheKey(e.cacheMap.Digest, e.edge.Index) + k.deps = keys + + return k +} + +// slow cache keys can be computed in 2 phases if there are multiple deps. +// first evaluate ones that didn't match any definition based keys +func (e *edge) skipPhase2SlowCache(dep *dep) bool { + isPhase1 := false + for _, dep := range e.deps { + if !dep.slowCacheComplete && e.slowCacheFunc(dep) != nil && len(dep.keyMap) == 0 { + isPhase1 = true + break + } + } + + if isPhase1 && !dep.slowCacheComplete && e.slowCacheFunc(dep) != nil && len(dep.keyMap) > 0 { + return true + } + return false +} + +func (e *edge) skipPhase2FastCache(dep *dep) bool { + isPhase1 := false + for _, dep := range e.deps { + if e.cacheMap == nil || len(dep.keyMap) == 0 && ((!dep.slowCacheComplete && e.slowCacheFunc(dep) != nil) || (dep.state < edgeStatusComplete && e.slowCacheFunc(dep) == nil)) { + isPhase1 = true + break + } + } + + if isPhase1 && len(dep.keyMap) > 0 { + return true + } + return false +} + +// unpark is called by the scheduler with incoming requests and updates for +// previous calls. +// To avoid deadlocks and resource leaks this function needs to follow +// following rules: +// 1) this function needs to return unclosed outgoing requests if some incoming +// requests were not completed +// 2) this function may not return outgoing requests if it has completed all +// incoming requests +func (e *edge) unpark(incoming []pipe.Sender, updates, allPipes []pipe.Receiver, f *pipeFactory) { + // process all incoming changes + depChanged := false + for _, upt := range updates { + if changed := e.processUpdate(upt); changed { + depChanged = true + } + } + + if depChanged { + // the dep responses had changes. need to reevaluate edge state + e.recalcCurrentState() + } + + desiredState, done := e.respondToIncoming(incoming, allPipes) + if done { + return + } + + // set up new outgoing requests if needed + if e.cacheMapReq == nil && (e.cacheMap == nil || len(e.cacheRecords) == 0) { + index := e.cacheMapIndex + e.cacheMapReq = f.NewFuncRequest(func(ctx context.Context) (interface{}, error) { + return e.op.CacheMap(ctx, index) + }) + } + + // execute op + if e.execReq == nil && desiredState == edgeStatusComplete { + if ok := e.execIfPossible(f); ok { + return + } + } + + if e.execReq == nil { + e.createInputRequests(desiredState, f) + } + +} + +func (e *edge) makeExportable(k *CacheKey, records []*CacheRecord) ExportableCacheKey { + return ExportableCacheKey{ + CacheKey: k, + Exporter: &exporter{k: k, records: records, override: e.edge.Vertex.Options().ExportCache}, + } +} + +// processUpdate is called by unpark for every updated pipe request +func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) { + // response for cachemap request + if upt == e.cacheMapReq && upt.Status().Completed { + if err := upt.Status().Err; err != nil { + e.cacheMapReq = nil + if !upt.Status().Canceled && e.err == nil { + e.err = err + } + } else { + resp := upt.Status().Value.(*cacheMapResp) + e.cacheMap = resp.CacheMap + e.cacheMapDone = resp.complete + e.cacheMapIndex++ + if len(e.deps) == 0 { + e.cacheMapDigests = append(e.cacheMapDigests, e.cacheMap.Digest) + if !e.op.IgnoreCache() { + keys, err := e.op.Cache().Query(nil, 0, e.cacheMap.Digest, e.edge.Index) + if err != nil { + logrus.Error(errors.Wrap(err, "invalid query response")) // make the build fail for this error + } else { + for _, k := range keys { + records, err := e.op.Cache().Records(k) + if err != nil { + logrus.Errorf("error receiving cache records: %v", err) + continue + } + + for _, r := range records { + e.cacheRecords[r.ID] = r + } + + e.keys = append(e.keys, e.makeExportable(k, records)) + } + } + } + e.state = edgeStatusCacheSlow + } + if e.allDepsHaveKeys() { + e.keysDidChange = true + } + // probe keys that were loaded before cache map + for i, dep := range e.deps { + e.probeCache(dep, withSelector(dep.keys, e.cacheMap.Deps[i].Selector)) + e.checkDepMatchPossible(dep) + } + if !e.cacheMapDone { + e.cacheMapReq = nil + } + } + return true + } + + // response for exec request + if upt == e.execReq && upt.Status().Completed { + if err := upt.Status().Err; err != nil { + e.execReq = nil + if !upt.Status().Canceled && e.err == nil { + e.err = err + } + } else { + e.result = NewSharedCachedResult(upt.Status().Value.(CachedResult)) + e.state = edgeStatusComplete + } + return true + } + + // response for requests to dependencies + if dep, ok := e.depRequests[upt]; ok { // TODO: ignore canceled + if err := upt.Status().Err; !upt.Status().Canceled && upt.Status().Completed && err != nil { + if e.err == nil { + e.err = err + } + dep.err = err + } + + state := upt.Status().Value.(*edgeState) + + if len(dep.keys) < len(state.keys) { + newKeys := state.keys[len(dep.keys):] + if e.cacheMap != nil { + e.probeCache(dep, withSelector(newKeys, e.cacheMap.Deps[dep.index].Selector)) + if e.allDepsHaveKeys() { + e.keysDidChange = true + } + } + depChanged = true + } + if dep.state != edgeStatusComplete && state.state == edgeStatusComplete { + e.keysDidChange = true + } + + recheck := state.state != dep.state + + dep.edgeState = *state + + if recheck && e.cacheMap != nil { + e.checkDepMatchPossible(dep) + depChanged = true + } + + return + } + + // response for result based cache function + for i, dep := range e.deps { + if upt == dep.slowCacheReq && upt.Status().Completed { + if err := upt.Status().Err; err != nil { + dep.slowCacheReq = nil + if !upt.Status().Canceled && e.err == nil { + e.err = upt.Status().Err + } + } else if !dep.slowCacheComplete { + k := NewCacheKey(upt.Status().Value.(digest.Digest), -1) + dep.slowCacheKey = &ExportableCacheKey{CacheKey: k, Exporter: &exporter{k: k}} + slowKeyExp := CacheKeyWithSelector{CacheKey: *dep.slowCacheKey} + defKeyExp := CacheKeyWithSelector{CacheKey: dep.result.CacheKey(), Selector: e.cacheMap.Deps[i].Selector} + dep.slowCacheFoundKey = e.probeCache(dep, []CacheKeyWithSelector{slowKeyExp}) + + // connect def key to slow key + e.op.Cache().Query([]CacheKeyWithSelector{defKeyExp, slowKeyExp}, dep.index, e.cacheMap.Digest, e.edge.Index) + + dep.slowCacheComplete = true + e.keysDidChange = true + e.checkDepMatchPossible(dep) // not matching key here doesn't set nocachematch possible to true + } + return true + } + } + + return +} + +// recalcCurrentState is called by unpark to recompute internal state after +// the state of dependencies has changed +func (e *edge) recalcCurrentState() { + // TODO: fast pass to detect incomplete results + newKeys := map[string]*CacheKey{} + + for i, dep := range e.deps { + if i == 0 { + for id, k := range dep.keyMap { + if _, ok := e.keyMap[id]; ok { + continue + } + newKeys[id] = k + } + } else { + for id := range newKeys { + if _, ok := dep.keyMap[id]; !ok { + delete(newKeys, id) + } + } + } + if len(newKeys) == 0 { + break + } + } + + for _, r := range newKeys { + // TODO: add all deps automatically + mergedKey := r.clone() + mergedKey.deps = make([][]CacheKeyWithSelector, len(e.deps)) + for i, dep := range e.deps { + if dep.result != nil { + mergedKey.deps[i] = append(mergedKey.deps[i], CacheKeyWithSelector{Selector: e.cacheMap.Deps[i].Selector, CacheKey: dep.result.CacheKey()}) + if dep.slowCacheKey != nil { + mergedKey.deps[i] = append(mergedKey.deps[i], CacheKeyWithSelector{CacheKey: *dep.slowCacheKey}) + } + } else { + for _, k := range dep.keys { + mergedKey.deps[i] = append(mergedKey.deps[i], CacheKeyWithSelector{Selector: e.cacheMap.Deps[i].Selector, CacheKey: k}) + } + } + } + + records, err := e.op.Cache().Records(mergedKey) + if err != nil { + logrus.Errorf("error receiving cache records: %v", err) + continue + } + + for _, r := range records { + e.cacheRecords[r.ID] = r + } + + e.keys = append(e.keys, e.makeExportable(mergedKey, records)) + } + + // detect lower/upper bound for current state + allDepsCompletedCacheFast := e.cacheMap != nil + allDepsCompletedCacheSlow := e.cacheMap != nil + allDepsStateCacheSlow := true + allDepsCompleted := true + stLow := edgeStatusInitial // minimal possible state + stHigh := edgeStatusCacheSlow // maximum possible state + if e.cacheMap != nil { + for _, dep := range e.deps { + isSlowIncomplete := e.slowCacheFunc(dep) != nil && (dep.state == edgeStatusCacheSlow || (dep.state == edgeStatusComplete && !dep.slowCacheComplete)) + + if dep.state > stLow && len(dep.keyMap) == 0 && !isSlowIncomplete { + stLow = dep.state + if stLow > edgeStatusCacheSlow { + stLow = edgeStatusCacheSlow + } + } + effectiveState := dep.state + if dep.state == edgeStatusCacheSlow && isSlowIncomplete { + effectiveState = edgeStatusCacheFast + } + if dep.state == edgeStatusComplete && isSlowIncomplete { + effectiveState = edgeStatusCacheFast + } + if effectiveState < stHigh { + stHigh = effectiveState + } + if isSlowIncomplete || dep.state < edgeStatusComplete { + allDepsCompleted = false + } + if dep.state < edgeStatusCacheFast { + allDepsCompletedCacheFast = false + } + if isSlowIncomplete || dep.state < edgeStatusCacheSlow { + allDepsCompletedCacheSlow = false + } + if dep.state < edgeStatusCacheSlow && len(dep.keys) == 0 { + allDepsStateCacheSlow = false + } + } + if stLow > e.state { + e.state = stLow + } + if stHigh > e.state { + e.state = stHigh + } + if !e.cacheMapDone && len(e.keys) == 0 { + e.state = edgeStatusInitial + } + + e.allDepsCompletedCacheFast = e.cacheMapDone && allDepsCompletedCacheFast + e.allDepsCompletedCacheSlow = e.cacheMapDone && allDepsCompletedCacheSlow + e.allDepsStateCacheSlow = e.cacheMapDone && allDepsStateCacheSlow + e.allDepsCompleted = e.cacheMapDone && allDepsCompleted + } +} + +// respondToIncoming responds to all incoming requests. completing or +// updating them when possible +func (e *edge) respondToIncoming(incoming []pipe.Sender, allPipes []pipe.Receiver) (edgeStatusType, bool) { + // detect the result state for the requests + allIncomingCanComplete := true + desiredState := e.state + allCanceled := true + + // check incoming requests + // check if all requests can be either answered or canceled + if !e.isComplete() { + for _, req := range incoming { + if !req.Request().Canceled { + allCanceled = false + if r := req.Request().Payload.(*edgeRequest); desiredState < r.desiredState { + desiredState = r.desiredState + if e.hasActiveOutgoing || r.desiredState == edgeStatusComplete || r.currentKeys == len(e.keys) { + allIncomingCanComplete = false + } + } + } + } + } + + // do not set allIncomingCanComplete if active ongoing can modify the state + if !allCanceled && e.state < edgeStatusComplete && len(e.keys) == 0 && e.hasActiveOutgoing { + allIncomingCanComplete = false + } + + if debugScheduler { + logrus.Debugf("status state=%s cancomplete=%v hasouts=%v noPossibleCache=%v depsCacheFast=%v keys=%d cacheRecords=%d", e.state, allIncomingCanComplete, e.hasActiveOutgoing, e.noCacheMatchPossible, e.allDepsCompletedCacheFast, len(e.keys), len(e.cacheRecords)) + } + + if allIncomingCanComplete && e.hasActiveOutgoing { + // cancel all current requests + for _, p := range allPipes { + p.Cancel() + } + + // can close all but one requests + var leaveOpen pipe.Sender + for _, req := range incoming { + if !req.Request().Canceled { + leaveOpen = req + break + } + } + for _, req := range incoming { + if leaveOpen == nil || leaveOpen == req { + leaveOpen = req + continue + } + e.finishIncoming(req) + } + return desiredState, true + } + + // can complete, finish and return + if allIncomingCanComplete && !e.hasActiveOutgoing { + for _, req := range incoming { + e.finishIncoming(req) + } + return desiredState, true + } + + // update incoming based on current state + for _, req := range incoming { + r := req.Request().Payload.(*edgeRequest) + if req.Request().Canceled { + e.finishIncoming(req) + } else if !e.hasActiveOutgoing && e.state >= r.desiredState { + e.finishIncoming(req) + } else if !isEqualState(r.currentState, e.edgeState) && !req.Request().Canceled { + e.updateIncoming(req) + } + } + return desiredState, false +} + +// createInputRequests creates new requests for dependencies or async functions +// that need to complete to continue processing the edge +func (e *edge) createInputRequests(desiredState edgeStatusType, f *pipeFactory) { + // initialize deps state + if e.deps == nil { + e.depRequests = make(map[pipe.Receiver]*dep) + e.deps = make([]*dep, 0, len(e.edge.Vertex.Inputs())) + for i := range e.edge.Vertex.Inputs() { + e.deps = append(e.deps, newDep(Index(i))) + } + } + + // cycle all dependencies. set up outgoing requests if needed + for _, dep := range e.deps { + desiredStateDep := dep.state + + if e.noCacheMatchPossible { + desiredStateDep = edgeStatusComplete + } else if dep.state == edgeStatusInitial && desiredState > dep.state { + desiredStateDep = edgeStatusCacheFast + } else if dep.state == edgeStatusCacheFast && desiredState > dep.state { + // wait all deps to complete cache fast before continuing with slow cache + if (e.allDepsCompletedCacheFast && len(e.keys) == 0) || len(dep.keys) == 0 || e.allDepsHaveKeys() { + if !e.skipPhase2FastCache(dep) { + desiredStateDep = edgeStatusCacheSlow + } + } + } else if dep.state == edgeStatusCacheSlow && desiredState == edgeStatusComplete { + // if all deps have completed cache-slow or content based cache for input is available + if (len(dep.keys) == 0 || e.allDepsCompletedCacheSlow || (!e.skipPhase2FastCache(dep) && e.slowCacheFunc(dep) != nil)) && (len(e.cacheRecords) == 0) { + if len(dep.keys) == 0 || !e.skipPhase2SlowCache(dep) && e.allDepsStateCacheSlow { + desiredStateDep = edgeStatusComplete + } + } + } else if dep.state == edgeStatusCacheSlow && e.slowCacheFunc(dep) != nil && desiredState == edgeStatusCacheSlow { + if len(dep.keys) == 0 || !e.skipPhase2SlowCache(dep) && e.allDepsStateCacheSlow { + desiredStateDep = edgeStatusComplete + } + } + + // outgoing request is needed + if dep.state < desiredStateDep { + addNew := true + if dep.req != nil && !dep.req.Status().Completed { + if dep.req.Request().(*edgeRequest).desiredState != desiredStateDep { + dep.req.Cancel() + } else { + addNew = false + } + } + if addNew { + req := f.NewInputRequest(e.edge.Vertex.Inputs()[int(dep.index)], &edgeRequest{ + currentState: dep.edgeState, + desiredState: desiredStateDep, + currentKeys: len(dep.keys), + }) + e.depRequests[req] = dep + dep.req = req + } + } + // initialize function to compute cache key based on dependency result + if dep.state == edgeStatusComplete && dep.slowCacheReq == nil && e.slowCacheFunc(dep) != nil && e.cacheMap != nil { + fn := e.slowCacheFunc(dep) + res := dep.result + func(fn ResultBasedCacheFunc, res Result, index Index) { + dep.slowCacheReq = f.NewFuncRequest(func(ctx context.Context) (interface{}, error) { + return e.op.CalcSlowCache(ctx, index, fn, res) + }) + }(fn, res, dep.index) + } + } +} + +// execIfPossible creates a request for getting the edge result if there is +// enough state +func (e *edge) execIfPossible(f *pipeFactory) bool { + if len(e.cacheRecords) > 0 { + if e.keysDidChange { + e.postpone(f) + return true + } + e.execReq = f.NewFuncRequest(e.loadCache) + for req := range e.depRequests { + req.Cancel() + } + return true + } else if e.allDepsCompleted { + if e.keysDidChange { + e.postpone(f) + return true + } + e.execReq = f.NewFuncRequest(e.execOp) + return true + } + return false +} + +// postpone delays exec to next unpark invocation if we have unprocessed keys +func (e *edge) postpone(f *pipeFactory) { + f.NewFuncRequest(func(context.Context) (interface{}, error) { + return nil, nil + }) +} + +// loadCache creates a request to load edge result from cache +func (e *edge) loadCache(ctx context.Context) (interface{}, error) { + recs := make([]*CacheRecord, 0, len(e.cacheRecords)) + for _, r := range e.cacheRecords { + recs = append(recs, r) + } + + rec := getBestResult(recs) + + logrus.Debugf("load cache for %s with %s", e.edge.Vertex.Name(), rec.ID) + res, err := e.op.LoadCache(ctx, rec) + if err != nil { + return nil, err + } + + return NewCachedResult(res, ExportableCacheKey{CacheKey: rec.key, Exporter: &exporter{k: rec.key, record: rec, edge: e}}), nil +} + +// execOp creates a request to execute the vertex operation +func (e *edge) execOp(ctx context.Context) (interface{}, error) { + cacheKeys, inputs := e.commitOptions() + results, subExporters, err := e.op.Exec(ctx, toResultSlice(inputs)) + if err != nil { + return nil, err + } + + index := e.edge.Index + if len(results) <= int(index) { + return nil, errors.Errorf("invalid response from exec need %d index but %d results received", index, len(results)) + } + + res := results[int(index)] + + for i := range results { + if i != int(index) { + go results[i].Release(context.TODO()) + } + } + + var exporters []CacheExporter + + for _, cacheKey := range cacheKeys { + ck, err := e.op.Cache().Save(cacheKey, res) + if err != nil { + return nil, err + } + + if exp, ok := ck.Exporter.(*exporter); ok { + exp.edge = e + } + + exps := make([]CacheExporter, 0, len(subExporters)) + for _, exp := range subExporters { + exps = append(exps, exp.Exporter) + } + + exporters = append(exporters, ck.Exporter) + exporters = append(exporters, exps...) + } + + ck := &ExportableCacheKey{ + CacheKey: cacheKeys[0], + Exporter: &mergedExporter{exporters: exporters}, + } + + return NewCachedResult(res, *ck), nil +} + +func toResultSlice(cres []CachedResult) (out []Result) { + out = make([]Result, len(cres)) + for i := range cres { + out[i] = cres[i].(Result) + } + return out +} + +func isEqualState(s1, s2 edgeState) bool { + if s1.state != s2.state || s1.result != s2.result || s1.cacheMap != s2.cacheMap || len(s1.keys) != len(s2.keys) { + return false + } + return true +} + +func withSelector(keys []ExportableCacheKey, selector digest.Digest) []CacheKeyWithSelector { + out := make([]CacheKeyWithSelector, len(keys)) + for i, k := range keys { + out[i] = CacheKeyWithSelector{Selector: selector, CacheKey: k} + } + return out +} diff --git a/vendor/github.com/moby/buildkit/solver/exporter.go b/vendor/github.com/moby/buildkit/solver/exporter.go new file mode 100644 index 0000000000..fa963f414a --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/exporter.go @@ -0,0 +1,208 @@ +package solver + +import ( + "context" + + digest "github.com/opencontainers/go-digest" +) + +type exporter struct { + k *CacheKey + records []*CacheRecord + record *CacheRecord + + res []CacheExporterRecord + edge *edge // for secondaryExporters + override *bool +} + +func addBacklinks(t CacheExporterTarget, rec CacheExporterRecord, cm *cacheManager, id string, bkm map[string]CacheExporterRecord) (CacheExporterRecord, error) { + if rec == nil { + var ok bool + rec, ok = bkm[id] + if ok { + return rec, nil + } + _ = ok + } + if err := cm.backend.WalkBacklinks(id, func(id string, link CacheInfoLink) error { + if rec == nil { + rec = t.Add(link.Digest) + } + r, ok := bkm[id] + if !ok { + var err error + r, err = addBacklinks(t, nil, cm, id, bkm) + if err != nil { + return err + } + } + rec.LinkFrom(r, int(link.Input), link.Selector.String()) + return nil + }); err != nil { + return nil, err + } + if rec == nil { + rec = t.Add(digest.Digest(id)) + } + bkm[id] = rec + return rec, nil +} + +type backlinkT struct{} + +var backlinkKey = backlinkT{} + +func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt CacheExportOpt) ([]CacheExporterRecord, error) { + var bkm map[string]CacheExporterRecord + + if bk := ctx.Value(backlinkKey); bk == nil { + bkm = map[string]CacheExporterRecord{} + ctx = context.WithValue(ctx, backlinkKey, bkm) + } else { + bkm = bk.(map[string]CacheExporterRecord) + } + + if t.Visited(e) { + return e.res, nil + } + + deps := e.k.Deps() + + type expr struct { + r CacheExporterRecord + selector digest.Digest + } + + rec := t.Add(rootKey(e.k.Digest(), e.k.Output())) + allRec := []CacheExporterRecord{rec} + + addRecord := true + + if e.override != nil { + addRecord = *e.override + } + + if e.record == nil && len(e.k.Deps()) > 0 { + e.record = getBestResult(e.records) + } + + var remote *Remote + if v := e.record; v != nil && len(e.k.Deps()) > 0 && addRecord { + cm := v.cacheManager + key := cm.getID(v.key) + res, err := cm.backend.Load(key, v.ID) + if err != nil { + return nil, err + } + + remote, err = cm.results.LoadRemote(ctx, res) + if err != nil { + return nil, err + } + + if remote == nil && opt.Mode != CacheExportModeRemoteOnly { + res, err := cm.results.Load(ctx, res) + if err != nil { + return nil, err + } + remote, err = opt.Convert(ctx, res) + if err != nil { + return nil, err + } + res.Release(context.TODO()) + } + + if remote != nil { + for _, rec := range allRec { + rec.AddResult(v.CreatedAt, remote) + } + } + } + + if remote != nil && opt.Mode == CacheExportModeMin { + opt.Mode = CacheExportModeRemoteOnly + } + + srcs := make([][]expr, len(deps)) + + for i, deps := range deps { + for _, dep := range deps { + recs, err := dep.CacheKey.Exporter.ExportTo(ctx, t, opt) + if err != nil { + return nil, nil + } + for _, r := range recs { + srcs[i] = append(srcs[i], expr{r: r, selector: dep.Selector}) + } + } + } + + if e.edge != nil { + for _, de := range e.edge.secondaryExporters { + recs, err := de.cacheKey.CacheKey.Exporter.ExportTo(ctx, t, opt) + if err != nil { + return nil, nil + } + for _, r := range recs { + srcs[de.index] = append(srcs[de.index], expr{r: r, selector: de.cacheKey.Selector}) + } + } + } + + for i, srcs := range srcs { + for _, src := range srcs { + rec.LinkFrom(src.r, i, src.selector.String()) + } + } + + for cm, id := range e.k.ids { + if _, err := addBacklinks(t, rec, cm, id, bkm); err != nil { + return nil, err + } + } + + if v := e.record; v != nil && len(deps) == 0 { + cm := v.cacheManager + key := cm.getID(v.key) + if err := cm.backend.WalkIDsByResult(v.ID, func(id string) error { + if id == key { + return nil + } + allRec = append(allRec, t.Add(digest.Digest(id))) + return nil + }); err != nil { + return nil, err + } + } + + e.res = allRec + t.Visit(e) + + return e.res, nil +} + +func getBestResult(records []*CacheRecord) *CacheRecord { + var rec *CacheRecord + for _, r := range records { + if rec == nil || rec.CreatedAt.Before(r.CreatedAt) || (rec.CreatedAt.Equal(r.CreatedAt) && rec.Priority < r.Priority) { + rec = r + } + } + return rec +} + +type mergedExporter struct { + exporters []CacheExporter +} + +func (e *mergedExporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt CacheExportOpt) (er []CacheExporterRecord, err error) { + for _, e := range e.exporters { + r, err := e.ExportTo(ctx, t, opt) + if err != nil { + return nil, err + } + er = append(er, r...) + } + return +} diff --git a/vendor/github.com/moby/buildkit/solver/index.go b/vendor/github.com/moby/buildkit/solver/index.go new file mode 100644 index 0000000000..1bb17d8944 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/index.go @@ -0,0 +1,243 @@ +package solver + +import ( + "sync" + + "github.com/moby/buildkit/identity" +) + +// edgeIndex is a synchronous map for detecting edge collisions. +type edgeIndex struct { + mu sync.Mutex + + items map[string]*indexItem + backRefs map[*edge]map[string]struct{} +} + +type indexItem struct { + edge *edge + links map[CacheInfoLink]map[string]struct{} + deps map[string]struct{} +} + +func newEdgeIndex() *edgeIndex { + return &edgeIndex{ + items: map[string]*indexItem{}, + backRefs: map[*edge]map[string]struct{}{}, + } +} + +func (ei *edgeIndex) Release(e *edge) { + ei.mu.Lock() + defer ei.mu.Unlock() + + for id := range ei.backRefs[e] { + ei.releaseEdge(id, e) + } + delete(ei.backRefs, e) +} + +func (ei *edgeIndex) releaseEdge(id string, e *edge) { + item, ok := ei.items[id] + if !ok { + return + } + + item.edge = nil + + if len(item.links) == 0 { + for d := range item.deps { + ei.releaseLink(d, id) + } + delete(ei.items, id) + } +} + +func (ei *edgeIndex) releaseLink(id, target string) { + item, ok := ei.items[id] + if !ok { + return + } + + for lid, links := range item.links { + for check := range links { + if check == target { + delete(links, check) + } + } + if len(links) == 0 { + delete(item.links, lid) + } + } + + if item.edge == nil && len(item.links) == 0 { + for d := range item.deps { + ei.releaseLink(d, id) + } + delete(ei.items, id) + } +} + +func (ei *edgeIndex) LoadOrStore(k *CacheKey, e *edge) *edge { + ei.mu.Lock() + defer ei.mu.Unlock() + + // get all current edges that match the cachekey + ids := ei.getAllMatches(k) + + var oldID string + var old *edge + + for _, id := range ids { + if item, ok := ei.items[id]; ok { + if item.edge != e { + oldID = id + old = item.edge + } + } + } + + if old != nil && !(!isIgnoreCache(old) && isIgnoreCache(e)) { + ei.enforceLinked(oldID, k) + return old + } + + id := identity.NewID() + if len(ids) > 0 { + id = ids[0] + } + + ei.enforceLinked(id, k) + + ei.items[id].edge = e + backRefs, ok := ei.backRefs[e] + if !ok { + backRefs = map[string]struct{}{} + ei.backRefs[e] = backRefs + } + backRefs[id] = struct{}{} + + return nil +} + +// enforceLinked adds links from current ID to all dep keys +func (er *edgeIndex) enforceLinked(id string, k *CacheKey) { + main, ok := er.items[id] + if !ok { + main = &indexItem{ + links: map[CacheInfoLink]map[string]struct{}{}, + deps: map[string]struct{}{}, + } + er.items[id] = main + } + + deps := k.Deps() + + for i, dd := range deps { + for _, d := range dd { + ck := d.CacheKey.CacheKey + er.enforceIndexID(ck) + ll := CacheInfoLink{Input: Index(i), Digest: k.Digest(), Output: k.Output(), Selector: d.Selector} + for _, ckID := range ck.indexIDs { + if item, ok := er.items[ckID]; ok { + links, ok := item.links[ll] + if !ok { + links = map[string]struct{}{} + item.links[ll] = links + } + links[id] = struct{}{} + main.deps[ckID] = struct{}{} + } + } + } + } +} + +func (ei *edgeIndex) enforceIndexID(k *CacheKey) { + if len(k.indexIDs) > 0 { + return + } + + matches := ei.getAllMatches(k) + + if len(matches) > 0 { + k.indexIDs = matches + } else { + k.indexIDs = []string{identity.NewID()} + } + + for _, id := range k.indexIDs { + ei.enforceLinked(id, k) + } +} + +func (ei *edgeIndex) getAllMatches(k *CacheKey) []string { + deps := k.Deps() + + if len(deps) == 0 { + return []string{rootKey(k.Digest(), k.Output()).String()} + } + + for _, dd := range deps { + for _, k := range dd { + ei.enforceIndexID(k.CacheKey.CacheKey) + } + } + + matches := map[string]struct{}{} + + for i, dd := range deps { + if i == 0 { + for _, d := range dd { + ll := CacheInfoLink{Input: Index(i), Digest: k.Digest(), Output: k.Output(), Selector: d.Selector} + for _, ckID := range d.CacheKey.CacheKey.indexIDs { + item, ok := ei.items[ckID] + if ok { + for l := range item.links[ll] { + matches[l] = struct{}{} + } + } + } + } + continue + } + + if len(matches) == 0 { + break + } + + for m := range matches { + found := false + for _, d := range dd { + ll := CacheInfoLink{Input: Index(i), Digest: k.Digest(), Output: k.Output(), Selector: d.Selector} + for _, ckID := range d.CacheKey.CacheKey.indexIDs { + if l, ok := ei.items[ckID].links[ll]; ok { + if _, ok := l[m]; ok { + found = true + break + } + } + } + } + + if !found { + delete(matches, m) + } + } + } + + out := make([]string, 0, len(matches)) + + for m := range matches { + out = append(out, m) + } + + return out +} + +func isIgnoreCache(e *edge) bool { + if e.edge.Vertex == nil { + return false + } + return e.edge.Vertex.Options().IgnoreCache +} diff --git a/vendor/github.com/moby/buildkit/solver/internal/pipe/pipe.go b/vendor/github.com/moby/buildkit/solver/internal/pipe/pipe.go new file mode 100644 index 0000000000..e61a6b3465 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/internal/pipe/pipe.go @@ -0,0 +1,197 @@ +package pipe + +import ( + "context" + "sync" + "sync/atomic" + + "github.com/pkg/errors" +) + +type channel struct { + OnSendCompletion func() + value atomic.Value + lastValue interface{} +} + +func (c *channel) Send(v interface{}) { + c.value.Store(v) + if c.OnSendCompletion != nil { + c.OnSendCompletion() + } +} + +func (c *channel) Receive() (interface{}, bool) { + v := c.value.Load() + if c.lastValue == v { + return nil, false + } + c.lastValue = v + return v, true +} + +type Pipe struct { + Sender Sender + Receiver Receiver + OnReceiveCompletion func() + OnSendCompletion func() +} + +type Request struct { + Payload interface{} + Canceled bool +} + +type Sender interface { + Request() Request + Update(v interface{}) + Finalize(v interface{}, err error) + Status() Status +} + +type Receiver interface { + Receive() bool + Cancel() + Status() Status + Request() interface{} +} + +type Status struct { + Canceled bool + Completed bool + Err error + Value interface{} +} + +func NewWithFunction(f func(context.Context) (interface{}, error)) (*Pipe, func()) { + p := New(Request{}) + + ctx, cancel := context.WithCancel(context.TODO()) + + p.OnReceiveCompletion = func() { + if req := p.Sender.Request(); req.Canceled { + cancel() + } + } + + return p, func() { + res, err := f(ctx) + if err != nil { + p.Sender.Finalize(nil, err) + return + } + p.Sender.Finalize(res, nil) + } +} + +func New(req Request) *Pipe { + cancelCh := &channel{} + roundTripCh := &channel{} + pw := &sender{ + req: req, + recvChannel: cancelCh, + sendChannel: roundTripCh, + } + pr := &receiver{ + req: req, + recvChannel: roundTripCh, + sendChannel: cancelCh, + } + + p := &Pipe{ + Sender: pw, + Receiver: pr, + } + + cancelCh.OnSendCompletion = func() { + v, ok := cancelCh.Receive() + if ok { + pw.setRequest(v.(Request)) + } + if p.OnReceiveCompletion != nil { + p.OnReceiveCompletion() + } + } + + roundTripCh.OnSendCompletion = func() { + if p.OnSendCompletion != nil { + p.OnSendCompletion() + } + } + + return p +} + +type sender struct { + status Status + req Request + recvChannel *channel + sendChannel *channel + mu sync.Mutex +} + +func (pw *sender) Status() Status { + return pw.status +} + +func (pw *sender) Request() Request { + pw.mu.Lock() + defer pw.mu.Unlock() + return pw.req +} + +func (pw *sender) setRequest(req Request) { + pw.mu.Lock() + defer pw.mu.Unlock() + pw.req = req +} + +func (pw *sender) Update(v interface{}) { + pw.status.Value = v + pw.sendChannel.Send(pw.status) +} + +func (pw *sender) Finalize(v interface{}, err error) { + if v != nil { + pw.status.Value = v + } + pw.status.Err = err + pw.status.Completed = true + if errors.Cause(err) == context.Canceled && pw.req.Canceled { + pw.status.Canceled = true + } + pw.sendChannel.Send(pw.status) +} + +type receiver struct { + status Status + req Request + recvChannel *channel + sendChannel *channel +} + +func (pr *receiver) Request() interface{} { + return pr.req.Payload +} + +func (pr *receiver) Receive() bool { + v, ok := pr.recvChannel.Receive() + if !ok { + return false + } + pr.status = v.(Status) + return true +} + +func (pr *receiver) Cancel() { + req := pr.req + if req.Canceled { + return + } + req.Canceled = true + pr.sendChannel.Send(req) +} + +func (pr *receiver) Status() Status { + return pr.status +} diff --git a/vendor/github.com/moby/buildkit/solver/jobs.go b/vendor/github.com/moby/buildkit/solver/jobs.go new file mode 100644 index 0000000000..bce6c8b875 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/jobs.go @@ -0,0 +1,774 @@ +package solver + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/flightcontrol" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/tracing" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// ResolveOpFunc finds an Op implementation for a Vertex +type ResolveOpFunc func(Vertex, Builder) (Op, error) + +type Builder interface { + Build(ctx context.Context, e Edge) (CachedResult, error) + Call(ctx context.Context, name string, fn func(ctx context.Context) error) error +} + +// Solver provides a shared graph of all the vertexes currently being +// processed. Every vertex that is being solved needs to be loaded into job +// first. Vertex operations are invoked and progress tracking happends through +// jobs. +type Solver struct { + mu sync.RWMutex + jobs map[string]*Job + actives map[digest.Digest]*state + opts SolverOpt + + updateCond *sync.Cond + s *scheduler + index *edgeIndex +} + +type state struct { + jobs map[*Job]struct{} + parents map[digest.Digest]struct{} + childVtx map[digest.Digest]struct{} + + mpw *progress.MultiWriter + allPw map[progress.Writer]struct{} + + vtx Vertex + clientVertex client.Vertex + + mu sync.Mutex + op *sharedOp + edges map[Index]*edge + opts SolverOpt + index *edgeIndex + + cache map[string]CacheManager + mainCache CacheManager + solver *Solver +} + +func (s *state) getSessionID() string { + // TODO: connect with sessionmanager to avoid getting dropped sessions + s.mu.Lock() + for j := range s.jobs { + if j.SessionID != "" { + s.mu.Unlock() + return j.SessionID + } + } + parents := map[digest.Digest]struct{}{} + for p := range s.parents { + parents[p] = struct{}{} + } + s.mu.Unlock() + + for p := range parents { + s.solver.mu.Lock() + pst, ok := s.solver.actives[p] + s.solver.mu.Unlock() + if ok { + if sessionID := pst.getSessionID(); sessionID != "" { + return sessionID + } + } + } + return "" +} + +func (s *state) builder() *subBuilder { + return &subBuilder{state: s} +} + +func (s *state) getEdge(index Index) *edge { + s.mu.Lock() + defer s.mu.Unlock() + if e, ok := s.edges[index]; ok { + return e + } + + if s.op == nil { + s.op = newSharedOp(s.opts.ResolveOpFunc, s.opts.DefaultCache, s) + } + + e := newEdge(Edge{Index: index, Vertex: s.vtx}, s.op, s.index) + s.edges[index] = e + return e +} + +func (s *state) setEdge(index Index, newEdge *edge) { + s.mu.Lock() + defer s.mu.Unlock() + e, ok := s.edges[index] + if ok { + if e == newEdge { + return + } + e.release() + } + + newEdge.incrementReferenceCount() + s.edges[index] = newEdge +} + +func (s *state) combinedCacheManager() CacheManager { + s.mu.Lock() + cms := make([]CacheManager, 0, len(s.cache)+1) + cms = append(cms, s.mainCache) + for _, cm := range s.cache { + cms = append(cms, cm) + } + s.mu.Unlock() + + if len(cms) == 1 { + return s.mainCache + } + + return newCombinedCacheManager(cms, s.mainCache) +} + +func (s *state) Release() { + for _, e := range s.edges { + e.release() + } + if s.op != nil { + s.op.release() + } +} + +type subBuilder struct { + *state + mu sync.Mutex + exporters []ExportableCacheKey +} + +func (sb *subBuilder) Build(ctx context.Context, e Edge) (CachedResult, error) { + res, err := sb.solver.subBuild(ctx, e, sb.vtx) + if err != nil { + return nil, err + } + sb.mu.Lock() + sb.exporters = append(sb.exporters, res.CacheKey()) + sb.mu.Unlock() + return res, nil +} + +func (sb *subBuilder) Call(ctx context.Context, name string, fn func(ctx context.Context) error) error { + ctx = progress.WithProgress(ctx, sb.mpw) + return inVertexContext(ctx, name, fn) +} + +type Job struct { + list *Solver + pr *progress.MultiReader + pw progress.Writer + + progressCloser func() + SessionID string +} + +type SolverOpt struct { + ResolveOpFunc ResolveOpFunc + DefaultCache CacheManager +} + +func NewSolver(opts SolverOpt) *Solver { + if opts.DefaultCache == nil { + opts.DefaultCache = NewInMemoryCacheManager() + } + jl := &Solver{ + jobs: make(map[string]*Job), + actives: make(map[digest.Digest]*state), + opts: opts, + index: newEdgeIndex(), + } + jl.s = newScheduler(jl) + jl.updateCond = sync.NewCond(jl.mu.RLocker()) + return jl +} + +func (jl *Solver) setEdge(e Edge, newEdge *edge) { + jl.mu.RLock() + defer jl.mu.RUnlock() + + st, ok := jl.actives[e.Vertex.Digest()] + if !ok { + return + } + + st.setEdge(e.Index, newEdge) +} + +func (jl *Solver) getEdge(e Edge) *edge { + jl.mu.RLock() + defer jl.mu.RUnlock() + + st, ok := jl.actives[e.Vertex.Digest()] + if !ok { + return nil + } + return st.getEdge(e.Index) +} + +func (jl *Solver) subBuild(ctx context.Context, e Edge, parent Vertex) (CachedResult, error) { + v, err := jl.load(e.Vertex, parent, nil) + if err != nil { + return nil, err + } + e.Vertex = v + return jl.s.build(ctx, e) +} + +func (jl *Solver) Close() { + jl.s.Stop() +} + +func (jl *Solver) load(v, parent Vertex, j *Job) (Vertex, error) { + jl.mu.Lock() + defer jl.mu.Unlock() + + cache := map[Vertex]Vertex{} + + return jl.loadUnlocked(v, parent, j, cache) +} + +func (jl *Solver) loadUnlocked(v, parent Vertex, j *Job, cache map[Vertex]Vertex) (Vertex, error) { + if v, ok := cache[v]; ok { + return v, nil + } + origVtx := v + + inputs := make([]Edge, len(v.Inputs())) + for i, e := range v.Inputs() { + v, err := jl.loadUnlocked(e.Vertex, parent, j, cache) + if err != nil { + return nil, err + } + inputs[i] = Edge{Index: e.Index, Vertex: v} + } + + dgst := v.Digest() + + dgstWithoutCache := digest.FromBytes([]byte(fmt.Sprintf("%s-ignorecache", dgst))) + + // if same vertex is already loaded without cache just use that + st, ok := jl.actives[dgstWithoutCache] + + if !ok { + st, ok = jl.actives[dgst] + + // !ignorecache merges with ignorecache but ignorecache doesn't merge with !ignorecache + if ok && !st.vtx.Options().IgnoreCache && v.Options().IgnoreCache { + dgst = dgstWithoutCache + } + + v = &vertexWithCacheOptions{ + Vertex: v, + dgst: dgst, + inputs: inputs, + } + + st, ok = jl.actives[dgst] + } + + if !ok { + st = &state{ + opts: jl.opts, + jobs: map[*Job]struct{}{}, + parents: map[digest.Digest]struct{}{}, + childVtx: map[digest.Digest]struct{}{}, + allPw: map[progress.Writer]struct{}{}, + mpw: progress.NewMultiWriter(progress.WithMetadata("vertex", dgst)), + vtx: v, + clientVertex: initClientVertex(v), + edges: map[Index]*edge{}, + index: jl.index, + mainCache: jl.opts.DefaultCache, + cache: map[string]CacheManager{}, + solver: jl, + } + jl.actives[dgst] = st + } + + st.mu.Lock() + for _, cache := range v.Options().CacheSources { + if cache.ID() != st.mainCache.ID() { + if _, ok := st.cache[cache.ID()]; !ok { + st.cache[cache.ID()] = cache + } + } + } + + if j != nil { + if _, ok := st.jobs[j]; !ok { + st.jobs[j] = struct{}{} + } + } + st.mu.Unlock() + + if parent != nil { + if _, ok := st.parents[parent.Digest()]; !ok { + st.parents[parent.Digest()] = struct{}{} + parentState, ok := jl.actives[parent.Digest()] + if !ok { + return nil, errors.Errorf("inactive parent %s", parent.Digest()) + } + parentState.childVtx[dgst] = struct{}{} + + for id, c := range parentState.cache { + st.cache[id] = c + } + } + } + + jl.connectProgressFromState(st, st) + cache[origVtx] = v + return v, nil +} + +func (jl *Solver) connectProgressFromState(target, src *state) { + for j := range src.jobs { + if _, ok := target.allPw[j.pw]; !ok { + target.mpw.Add(j.pw) + target.allPw[j.pw] = struct{}{} + j.pw.Write(target.clientVertex.Digest.String(), target.clientVertex) + } + } + for p := range src.parents { + jl.connectProgressFromState(target, jl.actives[p]) + } +} + +func (jl *Solver) NewJob(id string) (*Job, error) { + jl.mu.Lock() + defer jl.mu.Unlock() + + if _, ok := jl.jobs[id]; ok { + return nil, errors.Errorf("job ID %s exists", id) + } + + pr, ctx, progressCloser := progress.NewContext(context.Background()) + pw, _, _ := progress.FromContext(ctx) // TODO: expose progress.Pipe() + + j := &Job{ + list: jl, + pr: progress.NewMultiReader(pr), + pw: pw, + progressCloser: progressCloser, + } + jl.jobs[id] = j + + jl.updateCond.Broadcast() + + return j, nil +} + +func (jl *Solver) Get(id string) (*Job, error) { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + go func() { + <-ctx.Done() + jl.updateCond.Broadcast() + }() + + jl.mu.RLock() + defer jl.mu.RUnlock() + for { + select { + case <-ctx.Done(): + return nil, errors.Errorf("no such job %s", id) + default: + } + j, ok := jl.jobs[id] + if !ok { + jl.updateCond.Wait() + continue + } + return j, nil + } +} + +// called with solver lock +func (jl *Solver) deleteIfUnreferenced(k digest.Digest, st *state) { + if len(st.jobs) == 0 && len(st.parents) == 0 { + for chKey := range st.childVtx { + chState := jl.actives[chKey] + delete(chState.parents, k) + jl.deleteIfUnreferenced(chKey, chState) + } + st.Release() + delete(jl.actives, k) + } +} + +func (j *Job) Build(ctx context.Context, e Edge) (CachedResult, error) { + v, err := j.list.load(e.Vertex, nil, j) + if err != nil { + return nil, err + } + e.Vertex = v + return j.list.s.build(ctx, e) +} + +func (j *Job) Discard() error { + defer j.progressCloser() + + j.list.mu.Lock() + defer j.list.mu.Unlock() + + j.pw.Close() + + for k, st := range j.list.actives { + if _, ok := st.jobs[j]; ok { + delete(st.jobs, j) + j.list.deleteIfUnreferenced(k, st) + } + if _, ok := st.allPw[j.pw]; ok { + delete(st.allPw, j.pw) + } + } + return nil +} + +func (j *Job) Call(ctx context.Context, name string, fn func(ctx context.Context) error) error { + ctx = progress.WithProgress(ctx, j.pw) + return inVertexContext(ctx, name, fn) +} + +type cacheMapResp struct { + *CacheMap + complete bool +} + +type activeOp interface { + CacheMap(context.Context, int) (*cacheMapResp, error) + LoadCache(ctx context.Context, rec *CacheRecord) (Result, error) + Exec(ctx context.Context, inputs []Result) (outputs []Result, exporters []ExportableCacheKey, err error) + IgnoreCache() bool + Cache() CacheManager + CalcSlowCache(context.Context, Index, ResultBasedCacheFunc, Result) (digest.Digest, error) +} + +func newSharedOp(resolver ResolveOpFunc, cacheManager CacheManager, st *state) *sharedOp { + so := &sharedOp{ + resolver: resolver, + st: st, + slowCacheRes: map[Index]digest.Digest{}, + slowCacheErr: map[Index]error{}, + } + return so +} + +type execRes struct { + execRes []*SharedResult + execExporters []ExportableCacheKey +} + +type sharedOp struct { + resolver ResolveOpFunc + st *state + g flightcontrol.Group + + opOnce sync.Once + op Op + subBuilder *subBuilder + err error + + execRes *execRes + execErr error + + cacheRes []*CacheMap + cacheDone bool + cacheErr error + + slowMu sync.Mutex + slowCacheRes map[Index]digest.Digest + slowCacheErr map[Index]error +} + +func (s *sharedOp) IgnoreCache() bool { + return s.st.vtx.Options().IgnoreCache +} + +func (s *sharedOp) Cache() CacheManager { + return s.st.combinedCacheManager() +} + +func (s *sharedOp) LoadCache(ctx context.Context, rec *CacheRecord) (Result, error) { + ctx = progress.WithProgress(ctx, s.st.mpw) + // no cache hit. start evaluating the node + span, ctx := tracing.StartSpan(ctx, "load cache: "+s.st.vtx.Name()) + notifyStarted(ctx, &s.st.clientVertex, true) + res, err := s.Cache().Load(ctx, rec) + tracing.FinishWithError(span, err) + notifyCompleted(ctx, &s.st.clientVertex, err, true) + return res, err +} + +func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, f ResultBasedCacheFunc, res Result) (digest.Digest, error) { + key, err := s.g.Do(ctx, fmt.Sprintf("slow-compute-%d", index), func(ctx context.Context) (interface{}, error) { + s.slowMu.Lock() + // TODO: add helpers for these stored values + if res := s.slowCacheRes[index]; res != "" { + s.slowMu.Unlock() + return res, nil + } + if err := s.slowCacheErr[index]; err != nil { + s.slowMu.Unlock() + return err, nil + } + s.slowMu.Unlock() + ctx = progress.WithProgress(ctx, s.st.mpw) + key, err := f(ctx, res) + complete := true + if err != nil { + canceled := false + select { + case <-ctx.Done(): + canceled = true + default: + } + if canceled && errors.Cause(err) == context.Canceled { + complete = false + } + } + s.slowMu.Lock() + defer s.slowMu.Unlock() + if complete { + if err == nil { + s.slowCacheRes[index] = key + } + s.slowCacheErr[index] = err + } + return key, err + }) + if err != nil { + return "", err + } + return key.(digest.Digest), nil +} + +func (s *sharedOp) CacheMap(ctx context.Context, index int) (*cacheMapResp, error) { + op, err := s.getOp() + if err != nil { + return nil, err + } + res, err := s.g.Do(ctx, "cachemap", func(ctx context.Context) (ret interface{}, retErr error) { + if s.cacheRes != nil && s.cacheDone || index < len(s.cacheRes) { + return s.cacheRes, nil + } + if s.cacheErr != nil { + return nil, s.cacheErr + } + ctx = progress.WithProgress(ctx, s.st.mpw) + ctx = session.NewContext(ctx, s.st.getSessionID()) + if len(s.st.vtx.Inputs()) == 0 { + // no cache hit. start evaluating the node + span, ctx := tracing.StartSpan(ctx, "cache request: "+s.st.vtx.Name()) + notifyStarted(ctx, &s.st.clientVertex, false) + defer func() { + tracing.FinishWithError(span, retErr) + notifyCompleted(ctx, &s.st.clientVertex, retErr, false) + }() + } + res, done, err := op.CacheMap(ctx, len(s.cacheRes)) + complete := true + if err != nil { + canceled := false + select { + case <-ctx.Done(): + canceled = true + default: + } + if canceled && errors.Cause(err) == context.Canceled { + complete = false + } + } + if complete { + if err == nil { + s.cacheRes = append(s.cacheRes, res) + s.cacheDone = done + } + s.cacheErr = err + } + return s.cacheRes, err + }) + if err != nil { + return nil, err + } + + if len(res.([]*CacheMap)) <= index { + return s.CacheMap(ctx, index) + } + + return &cacheMapResp{CacheMap: res.([]*CacheMap)[index], complete: s.cacheDone}, nil +} + +func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, exporters []ExportableCacheKey, err error) { + op, err := s.getOp() + if err != nil { + return nil, nil, err + } + res, err := s.g.Do(ctx, "exec", func(ctx context.Context) (ret interface{}, retErr error) { + if s.execRes != nil || s.execErr != nil { + return s.execRes, s.execErr + } + + ctx = progress.WithProgress(ctx, s.st.mpw) + ctx = session.NewContext(ctx, s.st.getSessionID()) + + // no cache hit. start evaluating the node + span, ctx := tracing.StartSpan(ctx, s.st.vtx.Name()) + notifyStarted(ctx, &s.st.clientVertex, false) + defer func() { + tracing.FinishWithError(span, retErr) + notifyCompleted(ctx, &s.st.clientVertex, retErr, false) + }() + + res, err := op.Exec(ctx, inputs) + complete := true + if err != nil { + canceled := false + select { + case <-ctx.Done(): + canceled = true + default: + } + if canceled && errors.Cause(err) == context.Canceled { + complete = false + } + } + if complete { + if res != nil { + var subExporters []ExportableCacheKey + s.subBuilder.mu.Lock() + if len(s.subBuilder.exporters) > 0 { + subExporters = append(subExporters, s.subBuilder.exporters...) + } + s.subBuilder.mu.Unlock() + + s.execRes = &execRes{execRes: wrapShared(res), execExporters: subExporters} + } + s.execErr = err + } + return s.execRes, err + }) + if err != nil { + return nil, nil, err + } + r := res.(*execRes) + return unwrapShared(r.execRes), r.execExporters, nil +} + +func (s *sharedOp) getOp() (Op, error) { + s.opOnce.Do(func() { + s.subBuilder = s.st.builder() + s.op, s.err = s.resolver(s.st.vtx, s.subBuilder) + }) + if s.err != nil { + return nil, s.err + } + return s.op, nil +} + +func (s *sharedOp) release() { + if s.execRes != nil { + for _, r := range s.execRes.execRes { + go r.Release(context.TODO()) + } + } +} + +func initClientVertex(v Vertex) client.Vertex { + inputDigests := make([]digest.Digest, 0, len(v.Inputs())) + for _, inp := range v.Inputs() { + inputDigests = append(inputDigests, inp.Vertex.Digest()) + } + return client.Vertex{ + Inputs: inputDigests, + Name: v.Name(), + Digest: v.Digest(), + } +} + +func wrapShared(inp []Result) []*SharedResult { + out := make([]*SharedResult, len(inp)) + for i, r := range inp { + out[i] = NewSharedResult(r) + } + return out +} + +func unwrapShared(inp []*SharedResult) []Result { + out := make([]Result, len(inp)) + for i, r := range inp { + out[i] = r.Clone() + } + return out +} + +type vertexWithCacheOptions struct { + Vertex + inputs []Edge + dgst digest.Digest +} + +func (v *vertexWithCacheOptions) Digest() digest.Digest { + return v.dgst +} + +func (v *vertexWithCacheOptions) Inputs() []Edge { + return v.inputs +} + +func notifyStarted(ctx context.Context, v *client.Vertex, cached bool) { + pw, _, _ := progress.FromContext(ctx) + defer pw.Close() + now := time.Now() + v.Started = &now + v.Completed = nil + v.Cached = cached + pw.Write(v.Digest.String(), *v) +} + +func notifyCompleted(ctx context.Context, v *client.Vertex, err error, cached bool) { + pw, _, _ := progress.FromContext(ctx) + defer pw.Close() + now := time.Now() + if v.Started == nil { + v.Started = &now + } + v.Completed = &now + v.Cached = cached + if err != nil { + v.Error = err.Error() + } + pw.Write(v.Digest.String(), *v) +} + +func inVertexContext(ctx context.Context, name string, f func(ctx context.Context) error) error { + v := client.Vertex{ + Digest: digest.FromBytes([]byte(identity.NewID())), + Name: name, + } + pw, _, ctx := progress.FromContext(ctx, progress.WithMetadata("vertex", v.Digest)) + notifyStarted(ctx, &v, false) + defer pw.Close() + err := f(ctx) + notifyCompleted(ctx, &v, err, false) + return err +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go new file mode 100644 index 0000000000..98dc2ad5d5 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go @@ -0,0 +1,172 @@ +package llbsolver + +import ( + "context" + "io" + "strings" + "sync" + + "github.com/docker/distribution/reference" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/remotecache" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/tracing" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type llbBridge struct { + builder solver.Builder + frontends map[string]frontend.Frontend + resolveWorker func() (worker.Worker, error) + ci *remotecache.CacheImporter + cms map[string]solver.CacheManager + cmsMu sync.Mutex +} + +func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res solver.CachedResult, exp map[string][]byte, err error) { + var cms []solver.CacheManager + for _, ref := range req.ImportCacheRefs { + b.cmsMu.Lock() + var cm solver.CacheManager + if prevCm, ok := b.cms[ref]; !ok { + r, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, nil, err + } + ref = reference.TagNameOnly(r).String() + func(ref string) { + cm = newLazyCacheManager(ref, func() (solver.CacheManager, error) { + var cmNew solver.CacheManager + if err := b.builder.Call(ctx, "importing cache manifest from "+ref, func(ctx context.Context) error { + cmNew, err = b.ci.Resolve(ctx, ref) + return err + }); err != nil { + return nil, err + } + return cmNew, nil + }) + }(ref) + b.cms[ref] = cm + } else { + cm = prevCm + } + cms = append(cms, cm) + b.cmsMu.Unlock() + } + + if req.Definition != nil && req.Definition.Def != nil { + edge, err := Load(req.Definition, WithCacheSources(cms)) + if err != nil { + return nil, nil, err + } + res, err = b.builder.Build(ctx, edge) + if err != nil { + return nil, nil, err + } + } + if req.Frontend != "" { + f, ok := b.frontends[req.Frontend] + if !ok { + return nil, nil, errors.Errorf("invalid frontend: %s", req.Frontend) + } + res, exp, err = f.Solve(ctx, b, req.FrontendOpt) + if err != nil { + return nil, nil, err + } + } else { + if req.Definition == nil || req.Definition.Def == nil { + return nil, nil, nil + } + } + + if res != nil { + wr, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return nil, nil, errors.Errorf("invalid reference for exporting: %T", res.Sys()) + } + if wr.ImmutableRef != nil { + if err := wr.ImmutableRef.Finalize(ctx); err != nil { + return nil, nil, err + } + } + } + return +} + +func (s *llbBridge) Exec(ctx context.Context, meta executor.Meta, root cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) (err error) { + w, err := s.resolveWorker() + if err != nil { + return err + } + span, ctx := tracing.StartSpan(ctx, strings.Join(meta.Args, " ")) + err = w.Exec(ctx, meta, root, stdin, stdout, stderr) + tracing.FinishWithError(span, err) + return err +} + +func (s *llbBridge) ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error) { + w, err := s.resolveWorker() + if err != nil { + return "", nil, err + } + return w.ResolveImageConfig(ctx, ref) +} + +type lazyCacheManager struct { + id string + main solver.CacheManager + + waitCh chan struct{} + err error +} + +func (lcm *lazyCacheManager) ID() string { + return lcm.id +} +func (lcm *lazyCacheManager) Query(inp []solver.CacheKeyWithSelector, inputIndex solver.Index, dgst digest.Digest, outputIndex solver.Index) ([]*solver.CacheKey, error) { + if err := lcm.wait(); err != nil { + return nil, err + } + return lcm.main.Query(inp, inputIndex, dgst, outputIndex) +} +func (lcm *lazyCacheManager) Records(ck *solver.CacheKey) ([]*solver.CacheRecord, error) { + if err := lcm.wait(); err != nil { + return nil, err + } + return lcm.main.Records(ck) +} +func (lcm *lazyCacheManager) Load(ctx context.Context, rec *solver.CacheRecord) (solver.Result, error) { + if err := lcm.wait(); err != nil { + return nil, err + } + return lcm.main.Load(ctx, rec) +} +func (lcm *lazyCacheManager) Save(key *solver.CacheKey, s solver.Result) (*solver.ExportableCacheKey, error) { + if err := lcm.wait(); err != nil { + return nil, err + } + return lcm.main.Save(key, s) +} + +func (lcm *lazyCacheManager) wait() error { + <-lcm.waitCh + return lcm.err +} + +func newLazyCacheManager(id string, fn func() (solver.CacheManager, error)) solver.CacheManager { + lcm := &lazyCacheManager{id: id, waitCh: make(chan struct{})} + go func() { + defer close(lcm.waitCh) + cm, err := fn() + if err != nil { + lcm.err = err + return + } + lcm.main = cm + }() + return lcm +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go new file mode 100644 index 0000000000..9f035e1bf8 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go @@ -0,0 +1,128 @@ +package ops + +import ( + "context" + "encoding/json" + "os" + + "github.com/containerd/continuity/fs" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +const buildCacheType = "buildkit.build.v0" + +type buildOp struct { + op *pb.BuildOp + b frontend.FrontendLLBBridge + v solver.Vertex +} + +func NewBuildOp(v solver.Vertex, op *pb.Op_Build, b frontend.FrontendLLBBridge, _ worker.Worker) (solver.Op, error) { + return &buildOp{ + op: op.Build, + b: b, + v: v, + }, nil +} + +func (b *buildOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, bool, error) { + dt, err := json.Marshal(struct { + Type string + Exec *pb.BuildOp + }{ + Type: buildCacheType, + Exec: b.op, + }) + if err != nil { + return nil, false, err + } + + return &solver.CacheMap{ + Digest: digest.FromBytes(dt), + Deps: make([]struct { + Selector digest.Digest + ComputeDigestFunc solver.ResultBasedCacheFunc + }, len(b.v.Inputs())), + }, true, nil +} + +func (b *buildOp) Exec(ctx context.Context, inputs []solver.Result) (outputs []solver.Result, retErr error) { + if b.op.Builder != pb.LLBBuilder { + return nil, errors.Errorf("only LLB builder is currently allowed") + } + + builderInputs := b.op.Inputs + llbDef, ok := builderInputs[pb.LLBDefinitionInput] + if !ok { + return nil, errors.Errorf("no llb definition input %s found", pb.LLBDefinitionInput) + } + + i := int(llbDef.Input) + if i >= len(inputs) { + return nil, errors.Errorf("invalid index %v", i) // TODO: this should be validated before + } + inp := inputs[i] + + ref, ok := inp.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid reference for build %T", inp.Sys()) + } + + mount, err := ref.ImmutableRef.Mount(ctx, true) + if err != nil { + return nil, err + } + + lm := snapshot.LocalMounter(mount) + + root, err := lm.Mount() + if err != nil { + return nil, err + } + + defer func() { + if retErr != nil && lm != nil { + lm.Unmount() + } + }() + + fn := pb.LLBDefaultDefinitionFile + if override, ok := b.op.Attrs[pb.AttrLLBDefinitionFilename]; ok { + fn = override + } + + newfn, err := fs.RootPath(root, fn) + if err != nil { + return nil, errors.Wrapf(err, "working dir %s points to invalid target", fn) + } + + f, err := os.Open(newfn) + if err != nil { + return nil, errors.Wrapf(err, "failed to open %s", newfn) + } + + def, err := llb.ReadFrom(f) + if err != nil { + f.Close() + return nil, err + } + f.Close() + lm.Unmount() + lm = nil + + newref, _, err := b.b.Solve(ctx, frontend.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + + return []solver.Result{newref}, err +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go new file mode 100644 index 0000000000..ae5a322c19 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go @@ -0,0 +1,487 @@ +package ops + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "os" + "path" + "runtime" + "sort" + "strings" + "sync" + + "github.com/boltdb/bolt" + "github.com/containerd/containerd/mount" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/progress/logs" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const execCacheType = "buildkit.exec.v0" + +type execOp struct { + op *pb.ExecOp + cm cache.Manager + md *metadata.Store + exec executor.Executor + w worker.Worker + numInputs int +} + +func NewExecOp(v solver.Vertex, op *pb.Op_Exec, cm cache.Manager, md *metadata.Store, exec executor.Executor, w worker.Worker) (solver.Op, error) { + return &execOp{ + op: op.Exec, + cm: cm, + md: md, + exec: exec, + numInputs: len(v.Inputs()), + w: w, + }, nil +} + +func cloneExecOp(old *pb.ExecOp) pb.ExecOp { + n := *old + meta := *n.Meta + n.Meta = &meta + n.Mounts = nil + for i := range n.Mounts { + m := *n.Mounts[i] + n.Mounts = append(n.Mounts, &m) + } + return n +} + +func (e *execOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, bool, error) { + op := cloneExecOp(e.op) + for i := range op.Mounts { + op.Mounts[i].Selector = "" + } + op.Meta.ProxyEnv = nil + + dt, err := json.Marshal(struct { + Type string + Exec *pb.ExecOp + OS string + Arch string + }{ + Type: execCacheType, + Exec: &op, + OS: runtime.GOOS, + Arch: runtime.GOARCH, + }) + if err != nil { + return nil, false, err + } + + cm := &solver.CacheMap{ + Digest: digest.FromBytes(dt), + Deps: make([]struct { + Selector digest.Digest + ComputeDigestFunc solver.ResultBasedCacheFunc + }, e.numInputs), + } + + deps, err := e.getMountDeps() + if err != nil { + return nil, false, err + } + + for i, dep := range deps { + if len(dep.Selectors) != 0 { + dgsts := make([][]byte, 0, len(dep.Selectors)) + for _, p := range dep.Selectors { + dgsts = append(dgsts, []byte(p)) + } + cm.Deps[i].Selector = digest.FromBytes(bytes.Join(dgsts, []byte{0})) + } + if !dep.NoContentBasedHash { + cm.Deps[i].ComputeDigestFunc = llbsolver.NewContentHashFunc(dedupePaths(dep.Selectors)) + } + } + + return cm, true, nil +} + +func dedupePaths(inp []string) []string { + old := make(map[string]struct{}, len(inp)) + for _, p := range inp { + old[p] = struct{}{} + } + paths := make([]string, 0, len(old)) + for p1 := range old { + var skip bool + for p2 := range old { + if p1 != p2 && strings.HasPrefix(p1, p2) { + skip = true + break + } + } + if !skip { + paths = append(paths, p1) + } + } + sort.Slice(paths, func(i, j int) bool { + return paths[i] < paths[j] + }) + return paths +} + +type dep struct { + Selectors []string + NoContentBasedHash bool +} + +func (e *execOp) getMountDeps() ([]dep, error) { + deps := make([]dep, e.numInputs) + for _, m := range e.op.Mounts { + if m.Input == pb.Empty { + continue + } + if int(m.Input) >= len(deps) { + return nil, errors.Errorf("invalid mountinput %v", m) + } + + sel := m.Selector + if sel != "" { + sel = path.Join("/", sel) + deps[m.Input].Selectors = append(deps[m.Input].Selectors, sel) + } + + if !m.Readonly || m.Dest == pb.RootMount { // exclude read-only rootfs + deps[m.Input].NoContentBasedHash = true + } + } + return deps, nil +} + +func (e *execOp) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, m *pb.Mount) (cache.MutableRef, error) { + + key := "cache-dir:" + id + if ref != nil { + key += ":" + ref.ID() + } + + return sharedCacheRefs.get(key, func() (cache.MutableRef, error) { + return e.getRefCacheDirNoCache(ctx, key, ref, id, m) + }) +} + +func (e *execOp) getRefCacheDirNoCache(ctx context.Context, key string, ref cache.ImmutableRef, id string, m *pb.Mount) (cache.MutableRef, error) { + makeMutable := func(cache.ImmutableRef) (cache.MutableRef, error) { + desc := fmt.Sprintf("cached mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " ")) + return e.cm.New(ctx, ref, cache.WithDescription(desc), cache.CachePolicyRetain) + } + + sis, err := e.md.Search(key) + if err != nil { + return nil, err + } + for _, si := range sis { + if mRef, err := e.cm.GetMutable(ctx, si.ID()); err == nil { + logrus.Debugf("reusing ref for cache dir: %s", mRef.ID()) + return mRef, nil + } + } + mRef, err := makeMutable(ref) + if err != nil { + return nil, err + } + + si, _ := e.md.Get(mRef.ID()) + v, err := metadata.NewValue(key) + if err != nil { + mRef.Release(context.TODO()) + return nil, err + } + v.Index = key + if err := si.Update(func(b *bolt.Bucket) error { + return si.SetValue(b, key, v) + }); err != nil { + mRef.Release(context.TODO()) + return nil, err + } + return mRef, nil +} + +func (e *execOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Result, error) { + var mounts []executor.Mount + var root cache.Mountable + var readonlyRootFS bool + + var outputs []cache.Ref + + defer func() { + for _, o := range outputs { + if o != nil { + go o.Release(context.TODO()) + } + } + }() + + // loop over all mounts, fill in mounts, root and outputs + for _, m := range e.op.Mounts { + var mountable cache.Mountable + var ref cache.ImmutableRef + + if m.Dest == pb.RootMount && m.MountType != pb.MountType_BIND { + return nil, errors.Errorf("invalid mount type %s for %s", m.MountType.String(), m.Dest) + } + + // if mount is based on input validate and load it + if m.Input != pb.Empty { + if int(m.Input) > len(inputs) { + return nil, errors.Errorf("missing input %d", m.Input) + } + inp := inputs[int(m.Input)] + workerRef, ok := inp.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid reference for exec %T", inp.Sys()) + } + ref = workerRef.ImmutableRef + mountable = ref + } + + makeMutable := func(cache.ImmutableRef) (cache.MutableRef, error) { + desc := fmt.Sprintf("mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " ")) + return e.cm.New(ctx, ref, cache.WithDescription(desc)) + } + + switch m.MountType { + case pb.MountType_BIND: + // if mount creates an output + if m.Output != pb.SkipOutput { + // it it is readonly and not root then output is the input + if m.Readonly && ref != nil && m.Dest != pb.RootMount { + outputs = append(outputs, ref.Clone()) + } else { + // otherwise output and mount is the mutable child + active, err := makeMutable(ref) + if err != nil { + return nil, err + } + outputs = append(outputs, active) + mountable = active + } + } else if ref == nil { + // this case is empty readonly scratch without output that is not really useful for anything but don't error + active, err := makeMutable(ref) + if err != nil { + return nil, err + } + defer active.Release(context.TODO()) + mountable = active + } + + case pb.MountType_CACHE: + if m.CacheOpt == nil { + return nil, errors.Errorf("missing cache mount options") + } + mRef, err := e.getRefCacheDir(ctx, ref, m.CacheOpt.ID, m) + if err != nil { + return nil, err + } + mountable = mRef + defer func() { + go mRef.Release(context.TODO()) + }() + if m.Output != pb.SkipOutput && ref != nil { + outputs = append(outputs, ref.Clone()) + } + + case pb.MountType_TMPFS: + mountable = newTmpfs() + + default: + return nil, errors.Errorf("mount type %s not implemented", m.MountType) + } + + // validate that there is a mount + if mountable == nil { + return nil, errors.Errorf("mount %s has no input", m.Dest) + } + + // if dest is root we need mutable ref even if there is no output + if m.Dest == pb.RootMount { + root = mountable + readonlyRootFS = m.Readonly + if m.Output == pb.SkipOutput && readonlyRootFS { + active, err := makeMutable(ref) + if err != nil { + return nil, err + } + defer func() { + go active.Release(context.TODO()) + }() + root = active + } + } else { + mounts = append(mounts, executor.Mount{Src: mountable, Dest: m.Dest, Readonly: m.Readonly, Selector: m.Selector}) + } + } + + // sort mounts so parents are mounted first + sort.Slice(mounts, func(i, j int) bool { + return mounts[i].Dest < mounts[j].Dest + }) + + meta := executor.Meta{ + Args: e.op.Meta.Args, + Env: e.op.Meta.Env, + Cwd: e.op.Meta.Cwd, + User: e.op.Meta.User, + ReadonlyRootFS: readonlyRootFS, + } + + if e.op.Meta.ProxyEnv != nil { + meta.Env = append(meta.Env, proxyEnvList(e.op.Meta.ProxyEnv)...) + } + + stdout, stderr := logs.NewLogStreams(ctx, os.Getenv("BUILDKIT_DEBUG_EXEC_OUTPUT") == "1") + defer stdout.Close() + defer stderr.Close() + + if err := e.exec.Exec(ctx, meta, root, mounts, nil, stdout, stderr); err != nil { + return nil, errors.Wrapf(err, "executor failed running %v", meta.Args) + } + + refs := []solver.Result{} + for i, out := range outputs { + if mutable, ok := out.(cache.MutableRef); ok { + ref, err := mutable.Commit(ctx) + if err != nil { + return nil, errors.Wrapf(err, "error committing %s", mutable.ID()) + } + refs = append(refs, worker.NewWorkerRefResult(ref, e.w)) + } else { + refs = append(refs, worker.NewWorkerRefResult(out.(cache.ImmutableRef), e.w)) + } + outputs[i] = nil + } + return refs, nil +} + +func proxyEnvList(p *pb.ProxyEnv) []string { + out := []string{} + if v := p.HttpProxy; v != "" { + out = append(out, "HTTP_PROXY="+v, "http_proxy="+v) + } + if v := p.HttpsProxy; v != "" { + out = append(out, "HTTPS_PROXY="+v, "https_proxy="+v) + } + if v := p.FtpProxy; v != "" { + out = append(out, "FTP_PROXY="+v, "ftp_proxy="+v) + } + if v := p.NoProxy; v != "" { + out = append(out, "NO_PROXY="+v, "no_proxy="+v) + } + return out +} + +func newTmpfs() cache.Mountable { + return &tmpfs{} +} + +type tmpfs struct { +} + +func (f *tmpfs) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) { + return &tmpfsMount{readonly: readonly}, nil +} + +type tmpfsMount struct { + readonly bool +} + +func (m *tmpfsMount) Mount() ([]mount.Mount, error) { + opt := []string{"nosuid"} + if m.readonly { + opt = append(opt, "ro") + } + return []mount.Mount{{ + Type: "tmpfs", + Source: "tmpfs", + Options: opt, + }}, nil +} +func (m *tmpfsMount) Release() error { + return nil +} + +var sharedCacheRefs = &cacheRefs{} + +type cacheRefs struct { + mu sync.Mutex + shares map[string]*cacheRefShare +} + +func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache.MutableRef, error) { + r.mu.Lock() + defer r.mu.Unlock() + + if r.shares == nil { + r.shares = map[string]*cacheRefShare{} + } + + share, ok := r.shares[key] + if ok { + return share.clone(), nil + } + + mref, err := fn() + if err != nil { + return nil, err + } + + share = &cacheRefShare{MutableRef: mref, main: r, key: key, refs: map[*cacheRef]struct{}{}} + r.shares[key] = share + + return share.clone(), nil +} + +type cacheRefShare struct { + cache.MutableRef + mu sync.Mutex + refs map[*cacheRef]struct{} + main *cacheRefs + key string +} + +func (r *cacheRefShare) clone() cache.MutableRef { + cacheRef := &cacheRef{cacheRefShare: r} + r.mu.Lock() + r.refs[cacheRef] = struct{}{} + r.mu.Unlock() + return cacheRef +} + +func (r *cacheRefShare) release(ctx context.Context) error { + r.main.mu.Lock() + defer r.main.mu.Unlock() + delete(r.main.shares, r.key) + return r.MutableRef.Release(ctx) +} + +type cacheRef struct { + *cacheRefShare +} + +func (r *cacheRef) Release(ctx context.Context) error { + r.mu.Lock() + defer r.mu.Unlock() + delete(r.refs, r) + if len(r.refs) == 0 { + return r.release(ctx) + } + return nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go new file mode 100644 index 0000000000..2133a15463 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go @@ -0,0 +1,76 @@ +package ops + +import ( + "context" + "sync" + + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/source" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" +) + +const sourceCacheType = "buildkit.source.v0" + +type sourceOp struct { + mu sync.Mutex + op *pb.Op_Source + sm *source.Manager + src source.SourceInstance + w worker.Worker +} + +func NewSourceOp(_ solver.Vertex, op *pb.Op_Source, sm *source.Manager, w worker.Worker) (solver.Op, error) { + return &sourceOp{ + op: op, + sm: sm, + w: w, + }, nil +} + +func (s *sourceOp) instance(ctx context.Context) (source.SourceInstance, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.src != nil { + return s.src, nil + } + id, err := source.FromLLB(s.op) + if err != nil { + return nil, err + } + src, err := s.sm.Resolve(ctx, id) + if err != nil { + return nil, err + } + s.src = src + return s.src, nil +} + +func (s *sourceOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, bool, error) { + src, err := s.instance(ctx) + if err != nil { + return nil, false, err + } + k, done, err := src.CacheKey(ctx, index) + if err != nil { + return nil, false, err + } + + return &solver.CacheMap{ + // TODO: add os/arch + Digest: digest.FromBytes([]byte(sourceCacheType + ":" + k)), + }, done, nil +} + +func (s *sourceOp) Exec(ctx context.Context, _ []solver.Result) (outputs []solver.Result, err error) { + src, err := s.instance(ctx) + if err != nil { + return nil, err + } + ref, err := src.Snapshot(ctx) + if err != nil { + return nil, err + } + return []solver.Result{worker.NewWorkerRefResult(ref, s.w)}, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/result.go b/vendor/github.com/moby/buildkit/solver/llbsolver/result.go new file mode 100644 index 0000000000..7049b86855 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/result.go @@ -0,0 +1,152 @@ +package llbsolver + +import ( + "bytes" + "context" + "path" + "strings" + "time" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/contenthash" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +func NewContentHashFunc(selectors []string) solver.ResultBasedCacheFunc { + return func(ctx context.Context, res solver.Result) (digest.Digest, error) { + ref, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return "", errors.Errorf("invalid reference: %T", res) + } + + if len(selectors) == 0 { + selectors = []string{""} + } + + dgsts := make([][]byte, len(selectors)) + + eg, ctx := errgroup.WithContext(ctx) + + for i, sel := range selectors { + // FIXME(tonistiigi): enabling this parallelization seems to create wrong results for some big inputs(like gobuild) + // func(i int) { + // eg.Go(func() error { + dgst, err := contenthash.Checksum(ctx, ref.ImmutableRef, path.Join("/", sel)) + if err != nil { + return "", err + } + dgsts[i] = []byte(dgst) + // return nil + // }) + // }(i) + } + + if err := eg.Wait(); err != nil { + return "", err + } + + return digest.FromBytes(bytes.Join(dgsts, []byte{0})), nil + } +} + +func newCacheResultStorage(wc *worker.Controller) solver.CacheResultStorage { + return &cacheResultStorage{ + wc: wc, + } +} + +type cacheResultStorage struct { + wc *worker.Controller +} + +func (s *cacheResultStorage) Save(res solver.Result) (solver.CacheResult, error) { + ref, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return solver.CacheResult{}, errors.Errorf("invalid result: %T", res.Sys()) + } + if ref.ImmutableRef != nil { + if !cache.HasCachePolicyRetain(ref.ImmutableRef) { + if err := cache.CachePolicyRetain(ref.ImmutableRef); err != nil { + return solver.CacheResult{}, err + } + ref.ImmutableRef.Metadata().Commit() + } + } + return solver.CacheResult{ID: ref.ID(), CreatedAt: time.Now()}, nil +} +func (s *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult) (solver.Result, error) { + return s.load(res.ID) +} + +func (s *cacheResultStorage) getWorkerRef(id string) (worker.Worker, string, error) { + workerID, refID, err := parseWorkerRef(id) + if err != nil { + return nil, "", err + } + w, err := s.wc.Get(workerID) + if err != nil { + return nil, "", err + } + return w, refID, nil +} + +func (s *cacheResultStorage) load(id string) (solver.Result, error) { + w, refID, err := s.getWorkerRef(id) + if err != nil { + return nil, err + } + if refID == "" { + return worker.NewWorkerRefResult(nil, w), nil + } + ref, err := w.LoadRef(refID) + if err != nil { + return nil, err + } + return worker.NewWorkerRefResult(ref, w), nil +} + +func (s *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheResult) (*solver.Remote, error) { + w, refID, err := s.getWorkerRef(res.ID) + if err != nil { + return nil, err + } + ref, err := w.LoadRef(refID) + if err != nil { + return nil, err + } + defer ref.Release(context.TODO()) + remote, err := w.GetRemote(ctx, ref, false) + if err != nil { + return nil, nil // ignore error. loadRemote is best effort + } + return remote, nil +} +func (s *cacheResultStorage) Exists(id string) bool { + ref, err := s.load(id) + if err != nil { + return false + } + ref.Release(context.TODO()) + return true +} + +func parseWorkerRef(id string) (string, string, error) { + parts := strings.Split(id, "::") + if len(parts) != 2 { + return "", "", errors.Errorf("invalid workerref id: %s", id) + } + return parts[0], parts[1], nil +} + +func workerRefConverter(ctx context.Context, res solver.Result) (*solver.Remote, error) { + ref, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid result: %T", res.Sys()) + } + + return ref.Worker.GetRemote(ctx, ref.ImmutableRef, true) +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go new file mode 100644 index 0000000000..460d6e1ba1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go @@ -0,0 +1,164 @@ +package llbsolver + +import ( + "context" + "time" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/remotecache" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/worker" + "github.com/pkg/errors" +) + +type ExporterRequest struct { + Exporter exporter.ExporterInstance + CacheExporter *remotecache.RegistryCacheExporter + CacheExportMode solver.CacheExportMode +} + +// ResolveWorkerFunc returns default worker for the temporary default non-distributed use cases +type ResolveWorkerFunc func() (worker.Worker, error) + +type Solver struct { + solver *solver.Solver + resolveWorker ResolveWorkerFunc + frontends map[string]frontend.Frontend + ci *remotecache.CacheImporter +} + +func New(wc *worker.Controller, f map[string]frontend.Frontend, cacheStore solver.CacheKeyStorage, ci *remotecache.CacheImporter) *Solver { + s := &Solver{ + resolveWorker: defaultResolver(wc), + frontends: f, + ci: ci, + } + + results := newCacheResultStorage(wc) + + cache := solver.NewCacheManager("local", cacheStore, results) + + s.solver = solver.NewSolver(solver.SolverOpt{ + ResolveOpFunc: s.resolver(), + DefaultCache: cache, + }) + return s +} + +func (s *Solver) resolver() solver.ResolveOpFunc { + return func(v solver.Vertex, b solver.Builder) (solver.Op, error) { + w, err := s.resolveWorker() + if err != nil { + return nil, err + } + return w.ResolveOp(v, s.Bridge(b)) + } +} + +func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge { + return &llbBridge{ + builder: b, + frontends: s.frontends, + resolveWorker: s.resolveWorker, + ci: s.ci, + cms: map[string]solver.CacheManager{}, + } +} + +func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest, exp ExporterRequest) (*client.SolveResponse, error) { + j, err := s.solver.NewJob(id) + if err != nil { + return nil, err + } + + defer j.Discard() + + j.SessionID = session.FromContext(ctx) + + res, exporterOpt, err := s.Bridge(j).Solve(ctx, req) + if err != nil { + return nil, err + } + + defer func() { + if res != nil { + go res.Release(context.TODO()) + } + }() + + var exporterResponse map[string]string + if exp := exp.Exporter; exp != nil { + var immutable cache.ImmutableRef + if res != nil { + workerRef, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid reference: %T", res.Sys()) + } + immutable = workerRef.ImmutableRef + } + + if err := j.Call(ctx, exp.Name(), func(ctx context.Context) error { + exporterResponse, err = exp.Export(ctx, immutable, exporterOpt) + return err + }); err != nil { + return nil, err + } + } + + if e := exp.CacheExporter; e != nil { + if err := j.Call(ctx, "exporting cache", func(ctx context.Context) error { + prepareDone := oneOffProgress(ctx, "preparing build cache for export") + if _, err := res.CacheKey().Exporter.ExportTo(ctx, e, solver.CacheExportOpt{ + Convert: workerRefConverter, + Mode: exp.CacheExportMode, + }); err != nil { + return prepareDone(err) + } + prepareDone(nil) + + return e.Finalize(ctx) + }); err != nil { + return nil, err + } + } + + return &client.SolveResponse{ + ExporterResponse: exporterResponse, + }, nil +} + +func (s *Solver) Status(ctx context.Context, id string, statusChan chan *client.SolveStatus) error { + j, err := s.solver.Get(id) + if err != nil { + return err + } + return j.Status(ctx, statusChan) +} + +func defaultResolver(wc *worker.Controller) ResolveWorkerFunc { + return func() (worker.Worker, error) { + return wc.GetDefault() + } +} + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.FromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + // TODO: set error on status + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go b/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go new file mode 100644 index 0000000000..f7f17c184f --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go @@ -0,0 +1,147 @@ +package llbsolver + +import ( + "strings" + + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/source" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type vertex struct { + sys interface{} + options solver.VertexOptions + inputs []solver.Edge + digest digest.Digest + name string +} + +func (v *vertex) Digest() digest.Digest { + return v.digest +} + +func (v *vertex) Sys() interface{} { + return v.sys +} + +func (v *vertex) Options() solver.VertexOptions { + return v.options +} + +func (v *vertex) Inputs() []solver.Edge { + return v.inputs +} + +func (v *vertex) Name() string { + return v.name +} + +type LoadOpt func(*solver.VertexOptions) + +func WithCacheSources(cms []solver.CacheManager) LoadOpt { + return func(opt *solver.VertexOptions) { + opt.CacheSources = cms + } +} + +func Load(def *pb.Definition, opts ...LoadOpt) (solver.Edge, error) { + return loadLLB(def, func(dgst digest.Digest, pbOp *pb.Op, load func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error) { + opMetadata := def.Metadata[dgst] + vtx, err := newVertex(dgst, pbOp, &opMetadata, load, opts...) + if err != nil { + return nil, err + } + return vtx, nil + }) +} + +func newVertex(dgst digest.Digest, op *pb.Op, opMeta *pb.OpMetadata, load func(digest.Digest) (solver.Vertex, error), opts ...LoadOpt) (*vertex, error) { + opt := solver.VertexOptions{} + if opMeta != nil { + opt.IgnoreCache = opMeta.IgnoreCache + opt.Description = opMeta.Description + if opMeta.ExportCache != nil { + opt.ExportCache = &opMeta.ExportCache.Value + } + } + for _, fn := range opts { + fn(&opt) + } + vtx := &vertex{sys: op.Op, options: opt, digest: dgst, name: llbOpName(op)} + for _, in := range op.Inputs { + sub, err := load(in.Digest) + if err != nil { + return nil, err + } + vtx.inputs = append(vtx.inputs, solver.Edge{Index: solver.Index(in.Index), Vertex: sub}) + } + return vtx, nil +} + +// loadLLB loads LLB. +// fn is executed sequentially. +func loadLLB(def *pb.Definition, fn func(digest.Digest, *pb.Op, func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error)) (solver.Edge, error) { + if len(def.Def) == 0 { + return solver.Edge{}, errors.New("invalid empty definition") + } + + allOps := make(map[digest.Digest]*pb.Op) + + var dgst digest.Digest + + for _, dt := range def.Def { + var op pb.Op + if err := (&op).Unmarshal(dt); err != nil { + return solver.Edge{}, errors.Wrap(err, "failed to parse llb proto op") + } + dgst = digest.FromBytes(dt) + allOps[dgst] = &op + } + + lastOp := allOps[dgst] + delete(allOps, dgst) + dgst = lastOp.Inputs[0].Digest + + cache := make(map[digest.Digest]solver.Vertex) + + var rec func(dgst digest.Digest) (solver.Vertex, error) + rec = func(dgst digest.Digest) (solver.Vertex, error) { + if v, ok := cache[dgst]; ok { + return v, nil + } + v, err := fn(dgst, allOps[dgst], rec) + if err != nil { + return nil, err + } + cache[dgst] = v + return v, nil + } + + v, err := rec(dgst) + if err != nil { + return solver.Edge{}, err + } + return solver.Edge{Vertex: v, Index: solver.Index(lastOp.Inputs[0].Index)}, nil +} + +func llbOpName(op *pb.Op) string { + switch op := op.Op.(type) { + case *pb.Op_Source: + if id, err := source.FromLLB(op); err == nil { + if id, ok := id.(*source.LocalIdentifier); ok { + if len(id.IncludePatterns) == 1 { + return op.Source.Identifier + " (" + id.IncludePatterns[0] + ")" + } + } + } + return op.Source.Identifier + case *pb.Op_Exec: + return strings.Join(op.Exec.Meta.Args, " ") + case *pb.Op_Build: + return "build" + default: + return "unknown" + } +} diff --git a/vendor/github.com/moby/buildkit/solver/memorycachestorage.go b/vendor/github.com/moby/buildkit/solver/memorycachestorage.go new file mode 100644 index 0000000000..75c8abdead --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/memorycachestorage.go @@ -0,0 +1,307 @@ +package solver + +import ( + "context" + "sync" + "time" + + "github.com/pkg/errors" +) + +func NewInMemoryCacheStorage() CacheKeyStorage { + return &inMemoryStore{ + byID: map[string]*inMemoryKey{}, + byResult: map[string]map[string]struct{}{}, + } +} + +type inMemoryStore struct { + mu sync.RWMutex + byID map[string]*inMemoryKey + byResult map[string]map[string]struct{} +} + +type inMemoryKey struct { + id string + results map[string]CacheResult + links map[CacheInfoLink]map[string]struct{} + backlinks map[string]struct{} +} + +func (s *inMemoryStore) Exists(id string) bool { + s.mu.RLock() + defer s.mu.RUnlock() + if k, ok := s.byID[id]; ok { + return len(k.links) > 0 || len(k.results) > 0 + } + return false +} + +func newInMemoryKey(id string) *inMemoryKey { + return &inMemoryKey{ + results: map[string]CacheResult{}, + links: map[CacheInfoLink]map[string]struct{}{}, + backlinks: map[string]struct{}{}, + id: id, + } +} + +func (s *inMemoryStore) Walk(fn func(string) error) error { + s.mu.RLock() + ids := make([]string, 0, len(s.byID)) + for id := range s.byID { + ids = append(ids, id) + } + s.mu.RUnlock() + + for _, id := range ids { + if err := fn(id); err != nil { + return err + } + } + return nil +} + +func (s *inMemoryStore) WalkResults(id string, fn func(CacheResult) error) error { + s.mu.RLock() + + k, ok := s.byID[id] + if !ok { + s.mu.RUnlock() + return nil + } + copy := make([]CacheResult, 0, len(k.results)) + for _, res := range k.results { + copy = append(copy, res) + } + s.mu.RUnlock() + + for _, res := range copy { + if err := fn(res); err != nil { + return err + } + } + return nil +} + +func (s *inMemoryStore) Load(id string, resultID string) (CacheResult, error) { + s.mu.RLock() + defer s.mu.RUnlock() + k, ok := s.byID[id] + if !ok { + return CacheResult{}, errors.Wrapf(ErrNotFound, "no such key %s", id) + } + r, ok := k.results[resultID] + if !ok { + return CacheResult{}, errors.WithStack(ErrNotFound) + } + return r, nil +} + +func (s *inMemoryStore) AddResult(id string, res CacheResult) error { + s.mu.Lock() + defer s.mu.Unlock() + k, ok := s.byID[id] + if !ok { + k = newInMemoryKey(id) + s.byID[id] = k + } + k.results[res.ID] = res + m, ok := s.byResult[res.ID] + if !ok { + m = map[string]struct{}{} + s.byResult[res.ID] = m + } + m[id] = struct{}{} + return nil +} + +func (s *inMemoryStore) WalkIDsByResult(resultID string, fn func(string) error) error { + s.mu.Lock() + + ids := map[string]struct{}{} + for id := range s.byResult[resultID] { + ids[id] = struct{}{} + } + s.mu.Unlock() + + for id := range ids { + if err := fn(id); err != nil { + return err + } + } + + return nil +} + +func (s *inMemoryStore) Release(resultID string) error { + s.mu.Lock() + defer s.mu.Unlock() + + ids, ok := s.byResult[resultID] + if !ok { + return nil + } + + for id := range ids { + k, ok := s.byID[id] + if !ok { + continue + } + + delete(k.results, resultID) + delete(s.byResult[resultID], id) + if len(s.byResult[resultID]) == 0 { + delete(s.byResult, resultID) + } + + s.emptyBranchWithParents(k) + } + + return nil +} + +func (s *inMemoryStore) emptyBranchWithParents(k *inMemoryKey) { + if len(k.results) != 0 || len(k.links) != 0 { + return + } + for id := range k.backlinks { + p, ok := s.byID[id] + if !ok { + continue + } + for l := range p.links { + delete(p.links[l], k.id) + if len(p.links[l]) == 0 { + delete(p.links, l) + } + } + s.emptyBranchWithParents(p) + } + + delete(s.byID, k.id) +} + +func (s *inMemoryStore) AddLink(id string, link CacheInfoLink, target string) error { + s.mu.Lock() + defer s.mu.Unlock() + k, ok := s.byID[id] + if !ok { + k = newInMemoryKey(id) + s.byID[id] = k + } + k2, ok := s.byID[target] + if !ok { + k2 = newInMemoryKey(target) + s.byID[target] = k2 + } + m, ok := k.links[link] + if !ok { + m = map[string]struct{}{} + k.links[link] = m + } + + k2.backlinks[id] = struct{}{} + m[target] = struct{}{} + return nil +} + +func (s *inMemoryStore) WalkLinks(id string, link CacheInfoLink, fn func(id string) error) error { + s.mu.RLock() + k, ok := s.byID[id] + if !ok { + s.mu.RUnlock() + return nil + } + var links []string + for target := range k.links[link] { + links = append(links, target) + } + s.mu.RUnlock() + + for _, t := range links { + if err := fn(t); err != nil { + return err + } + } + return nil +} + +func (s *inMemoryStore) HasLink(id string, link CacheInfoLink, target string) bool { + s.mu.RLock() + defer s.mu.RUnlock() + if k, ok := s.byID[id]; ok { + if v, ok := k.links[link]; ok { + if _, ok := v[target]; ok { + return true + } + } + } + return false +} + +func (s *inMemoryStore) WalkBacklinks(id string, fn func(id string, link CacheInfoLink) error) error { + s.mu.RLock() + k, ok := s.byID[id] + if !ok { + s.mu.RUnlock() + return nil + } + var outIDs []string + var outLinks []CacheInfoLink + for bid := range k.backlinks { + b, ok := s.byID[bid] + if !ok { + continue + } + for l, m := range b.links { + if _, ok := m[id]; !ok { + continue + } + outIDs = append(outIDs, bid) + outLinks = append(outLinks, CacheInfoLink{ + Digest: rootKey(l.Digest, l.Output), + Input: l.Input, + Selector: l.Selector, + }) + } + } + s.mu.RUnlock() + + for i := range outIDs { + if err := fn(outIDs[i], outLinks[i]); err != nil { + return err + } + } + return nil +} + +func NewInMemoryResultStorage() CacheResultStorage { + return &inMemoryResultStore{m: &sync.Map{}} +} + +type inMemoryResultStore struct { + m *sync.Map +} + +func (s *inMemoryResultStore) Save(r Result) (CacheResult, error) { + s.m.Store(r.ID(), r) + return CacheResult{ID: r.ID(), CreatedAt: time.Now()}, nil +} + +func (s *inMemoryResultStore) Load(ctx context.Context, res CacheResult) (Result, error) { + v, ok := s.m.Load(res.ID) + if !ok { + return nil, errors.WithStack(ErrNotFound) + } + return v.(Result), nil +} + +func (s *inMemoryResultStore) LoadRemote(ctx context.Context, res CacheResult) (*Remote, error) { + return nil, nil +} + +func (s *inMemoryResultStore) Exists(id string) bool { + _, ok := s.m.Load(id) + return ok +} diff --git a/vendor/github.com/moby/buildkit/solver/pb/attr.go b/vendor/github.com/moby/buildkit/solver/pb/attr.go new file mode 100644 index 0000000000..cee4f1b326 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/pb/attr.go @@ -0,0 +1,16 @@ +package pb + +const AttrKeepGitDir = "git.keepgitdir" +const AttrFullRemoteURL = "git.fullurl" +const AttrLocalSessionID = "local.session" +const AttrIncludePatterns = "local.includepattern" +const AttrFollowPaths = "local.followpaths" +const AttrExcludePatterns = "local.excludepatterns" +const AttrSharedKeyHint = "local.sharedkeyhint" +const AttrLLBDefinitionFilename = "llbbuild.filename" + +const AttrHTTPChecksum = "http.checksum" +const AttrHTTPFilename = "http.filename" +const AttrHTTPPerm = "http.perm" +const AttrHTTPUID = "http.uid" +const AttrHTTPGID = "http.gid" diff --git a/vendor/github.com/moby/buildkit/solver/pb/const.go b/vendor/github.com/moby/buildkit/solver/pb/const.go new file mode 100644 index 0000000000..2cb9951082 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/pb/const.go @@ -0,0 +1,12 @@ +package pb + +type InputIndex int64 +type OutputIndex int64 + +const RootMount = "/" +const SkipOutput OutputIndex = -1 +const Empty InputIndex = -1 +const LLBBuilder InputIndex = -1 + +const LLBDefinitionInput = "buildkit.llb.definition" +const LLBDefaultDefinitionFile = LLBDefinitionInput diff --git a/vendor/github.com/moby/buildkit/solver/pb/generate.go b/vendor/github.com/moby/buildkit/solver/pb/generate.go new file mode 100644 index 0000000000..c31e148f2a --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/pb/generate.go @@ -0,0 +1,3 @@ +package pb + +//go:generate protoc -I=. -I=../../vendor/ --gogofaster_out=. ops.proto diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go new file mode 100644 index 0000000000..d6ac7ad9d6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go @@ -0,0 +1,4494 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ops.proto + +/* + Package pb is a generated protocol buffer package. + + Package pb provides the protobuf definition of LLB: low-level builder instruction. + LLB is DAG-structured; Op represents a vertex, and Definition represents a graph. + + It is generated from these files: + ops.proto + + It has these top-level messages: + Op + Input + ExecOp + Meta + Mount + CacheOpt + CopyOp + CopySource + SourceOp + BuildOp + BuildInput + OpMetadata + ExportCache + ProxyEnv + WorkerConstraint + Definition +*/ +package pb + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type MountType int32 + +const ( + MountType_BIND MountType = 0 + MountType_SECRET MountType = 1 + MountType_SSH MountType = 2 + MountType_CACHE MountType = 3 + MountType_TMPFS MountType = 4 +) + +var MountType_name = map[int32]string{ + 0: "BIND", + 1: "SECRET", + 2: "SSH", + 3: "CACHE", + 4: "TMPFS", +} +var MountType_value = map[string]int32{ + "BIND": 0, + "SECRET": 1, + "SSH": 2, + "CACHE": 3, + "TMPFS": 4, +} + +func (x MountType) String() string { + return proto.EnumName(MountType_name, int32(x)) +} +func (MountType) EnumDescriptor() ([]byte, []int) { return fileDescriptorOps, []int{0} } + +// Op represents a vertex of the LLB DAG. +type Op struct { + // inputs is a set of input edges. + Inputs []*Input `protobuf:"bytes,1,rep,name=inputs" json:"inputs,omitempty"` + // Types that are valid to be assigned to Op: + // *Op_Exec + // *Op_Source + // *Op_Copy + // *Op_Build + Op isOp_Op `protobuf_oneof:"op"` +} + +func (m *Op) Reset() { *m = Op{} } +func (m *Op) String() string { return proto.CompactTextString(m) } +func (*Op) ProtoMessage() {} +func (*Op) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{0} } + +type isOp_Op interface { + isOp_Op() + MarshalTo([]byte) (int, error) + Size() int +} + +type Op_Exec struct { + Exec *ExecOp `protobuf:"bytes,2,opt,name=exec,oneof"` +} +type Op_Source struct { + Source *SourceOp `protobuf:"bytes,3,opt,name=source,oneof"` +} +type Op_Copy struct { + Copy *CopyOp `protobuf:"bytes,4,opt,name=copy,oneof"` +} +type Op_Build struct { + Build *BuildOp `protobuf:"bytes,5,opt,name=build,oneof"` +} + +func (*Op_Exec) isOp_Op() {} +func (*Op_Source) isOp_Op() {} +func (*Op_Copy) isOp_Op() {} +func (*Op_Build) isOp_Op() {} + +func (m *Op) GetOp() isOp_Op { + if m != nil { + return m.Op + } + return nil +} + +func (m *Op) GetInputs() []*Input { + if m != nil { + return m.Inputs + } + return nil +} + +func (m *Op) GetExec() *ExecOp { + if x, ok := m.GetOp().(*Op_Exec); ok { + return x.Exec + } + return nil +} + +func (m *Op) GetSource() *SourceOp { + if x, ok := m.GetOp().(*Op_Source); ok { + return x.Source + } + return nil +} + +func (m *Op) GetCopy() *CopyOp { + if x, ok := m.GetOp().(*Op_Copy); ok { + return x.Copy + } + return nil +} + +func (m *Op) GetBuild() *BuildOp { + if x, ok := m.GetOp().(*Op_Build); ok { + return x.Build + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Op) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Op_OneofMarshaler, _Op_OneofUnmarshaler, _Op_OneofSizer, []interface{}{ + (*Op_Exec)(nil), + (*Op_Source)(nil), + (*Op_Copy)(nil), + (*Op_Build)(nil), + } +} + +func _Op_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Op) + // op + switch x := m.Op.(type) { + case *Op_Exec: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Exec); err != nil { + return err + } + case *Op_Source: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Source); err != nil { + return err + } + case *Op_Copy: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Copy); err != nil { + return err + } + case *Op_Build: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Build); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Op.Op has unexpected type %T", x) + } + return nil +} + +func _Op_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Op) + switch tag { + case 2: // op.exec + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ExecOp) + err := b.DecodeMessage(msg) + m.Op = &Op_Exec{msg} + return true, err + case 3: // op.source + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SourceOp) + err := b.DecodeMessage(msg) + m.Op = &Op_Source{msg} + return true, err + case 4: // op.copy + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CopyOp) + err := b.DecodeMessage(msg) + m.Op = &Op_Copy{msg} + return true, err + case 5: // op.build + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BuildOp) + err := b.DecodeMessage(msg) + m.Op = &Op_Build{msg} + return true, err + default: + return false, nil + } +} + +func _Op_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Op) + // op + switch x := m.Op.(type) { + case *Op_Exec: + s := proto.Size(x.Exec) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Op_Source: + s := proto.Size(x.Source) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Op_Copy: + s := proto.Size(x.Copy) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Op_Build: + s := proto.Size(x.Build) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Input represents an input edge for an Op. +type Input struct { + // digest of the marshaled input Op + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + // output index of the input Op + Index OutputIndex `protobuf:"varint,2,opt,name=index,proto3,customtype=OutputIndex" json:"index"` +} + +func (m *Input) Reset() { *m = Input{} } +func (m *Input) String() string { return proto.CompactTextString(m) } +func (*Input) ProtoMessage() {} +func (*Input) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{1} } + +// ExecOp executes a command in a container. +type ExecOp struct { + Meta *Meta `protobuf:"bytes,1,opt,name=meta" json:"meta,omitempty"` + Mounts []*Mount `protobuf:"bytes,2,rep,name=mounts" json:"mounts,omitempty"` +} + +func (m *ExecOp) Reset() { *m = ExecOp{} } +func (m *ExecOp) String() string { return proto.CompactTextString(m) } +func (*ExecOp) ProtoMessage() {} +func (*ExecOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{2} } + +func (m *ExecOp) GetMeta() *Meta { + if m != nil { + return m.Meta + } + return nil +} + +func (m *ExecOp) GetMounts() []*Mount { + if m != nil { + return m.Mounts + } + return nil +} + +// Meta is a set of arguments for ExecOp. +// Meta is unrelated to LLB metadata. +// FIXME: rename (ExecContext? ExecArgs?) +type Meta struct { + Args []string `protobuf:"bytes,1,rep,name=args" json:"args,omitempty"` + Env []string `protobuf:"bytes,2,rep,name=env" json:"env,omitempty"` + Cwd string `protobuf:"bytes,3,opt,name=cwd,proto3" json:"cwd,omitempty"` + User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"` + ProxyEnv *ProxyEnv `protobuf:"bytes,5,opt,name=proxy_env,json=proxyEnv" json:"proxy_env,omitempty"` +} + +func (m *Meta) Reset() { *m = Meta{} } +func (m *Meta) String() string { return proto.CompactTextString(m) } +func (*Meta) ProtoMessage() {} +func (*Meta) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{3} } + +func (m *Meta) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *Meta) GetEnv() []string { + if m != nil { + return m.Env + } + return nil +} + +func (m *Meta) GetCwd() string { + if m != nil { + return m.Cwd + } + return "" +} + +func (m *Meta) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *Meta) GetProxyEnv() *ProxyEnv { + if m != nil { + return m.ProxyEnv + } + return nil +} + +// Mount specifies how to mount an input Op as a filesystem. +type Mount struct { + Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` + Selector string `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"` + Dest string `protobuf:"bytes,3,opt,name=dest,proto3" json:"dest,omitempty"` + Output OutputIndex `protobuf:"varint,4,opt,name=output,proto3,customtype=OutputIndex" json:"output"` + Readonly bool `protobuf:"varint,5,opt,name=readonly,proto3" json:"readonly,omitempty"` + MountType MountType `protobuf:"varint,6,opt,name=mountType,proto3,enum=pb.MountType" json:"mountType,omitempty"` + CacheOpt *CacheOpt `protobuf:"bytes,20,opt,name=cacheOpt" json:"cacheOpt,omitempty"` +} + +func (m *Mount) Reset() { *m = Mount{} } +func (m *Mount) String() string { return proto.CompactTextString(m) } +func (*Mount) ProtoMessage() {} +func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{4} } + +func (m *Mount) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +func (m *Mount) GetDest() string { + if m != nil { + return m.Dest + } + return "" +} + +func (m *Mount) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *Mount) GetMountType() MountType { + if m != nil { + return m.MountType + } + return MountType_BIND +} + +func (m *Mount) GetCacheOpt() *CacheOpt { + if m != nil { + return m.CacheOpt + } + return nil +} + +type CacheOpt struct { + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *CacheOpt) Reset() { *m = CacheOpt{} } +func (m *CacheOpt) String() string { return proto.CompactTextString(m) } +func (*CacheOpt) ProtoMessage() {} +func (*CacheOpt) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{5} } + +func (m *CacheOpt) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +// CopyOp copies files across Ops. +type CopyOp struct { + Src []*CopySource `protobuf:"bytes,1,rep,name=src" json:"src,omitempty"` + Dest string `protobuf:"bytes,2,opt,name=dest,proto3" json:"dest,omitempty"` +} + +func (m *CopyOp) Reset() { *m = CopyOp{} } +func (m *CopyOp) String() string { return proto.CompactTextString(m) } +func (*CopyOp) ProtoMessage() {} +func (*CopyOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{6} } + +func (m *CopyOp) GetSrc() []*CopySource { + if m != nil { + return m.Src + } + return nil +} + +func (m *CopyOp) GetDest() string { + if m != nil { + return m.Dest + } + return "" +} + +// CopySource specifies a source for CopyOp. +type CopySource struct { + Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` + Selector string `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"` +} + +func (m *CopySource) Reset() { *m = CopySource{} } +func (m *CopySource) String() string { return proto.CompactTextString(m) } +func (*CopySource) ProtoMessage() {} +func (*CopySource) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{7} } + +func (m *CopySource) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +// SourceOp specifies a source such as build contexts and images. +type SourceOp struct { + // TODO: use source type or any type instead of URL protocol. + // identifier e.g. local://, docker-image://, git://, https://... + Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + // attrs are defined in attr.go + Attrs map[string]string `protobuf:"bytes,2,rep,name=attrs" json:"attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *SourceOp) Reset() { *m = SourceOp{} } +func (m *SourceOp) String() string { return proto.CompactTextString(m) } +func (*SourceOp) ProtoMessage() {} +func (*SourceOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{8} } + +func (m *SourceOp) GetIdentifier() string { + if m != nil { + return m.Identifier + } + return "" +} + +func (m *SourceOp) GetAttrs() map[string]string { + if m != nil { + return m.Attrs + } + return nil +} + +// BuildOp is used for nested build invocation. +type BuildOp struct { + Builder InputIndex `protobuf:"varint,1,opt,name=builder,proto3,customtype=InputIndex" json:"builder"` + Inputs map[string]*BuildInput `protobuf:"bytes,2,rep,name=inputs" json:"inputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` + Def *Definition `protobuf:"bytes,3,opt,name=def" json:"def,omitempty"` + Attrs map[string]string `protobuf:"bytes,4,rep,name=attrs" json:"attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *BuildOp) Reset() { *m = BuildOp{} } +func (m *BuildOp) String() string { return proto.CompactTextString(m) } +func (*BuildOp) ProtoMessage() {} +func (*BuildOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{9} } + +func (m *BuildOp) GetInputs() map[string]*BuildInput { + if m != nil { + return m.Inputs + } + return nil +} + +func (m *BuildOp) GetDef() *Definition { + if m != nil { + return m.Def + } + return nil +} + +func (m *BuildOp) GetAttrs() map[string]string { + if m != nil { + return m.Attrs + } + return nil +} + +// BuildInput is used for BuildOp. +type BuildInput struct { + Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` +} + +func (m *BuildInput) Reset() { *m = BuildInput{} } +func (m *BuildInput) String() string { return proto.CompactTextString(m) } +func (*BuildInput) ProtoMessage() {} +func (*BuildInput) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{10} } + +// OpMetadata is a per-vertex metadata entry, which can be defined for arbitrary Op vertex and overridable on the run time. +type OpMetadata struct { + // ignore_cache specifies to ignore the cache for this Op. + IgnoreCache bool `protobuf:"varint,1,opt,name=ignore_cache,json=ignoreCache,proto3" json:"ignore_cache,omitempty"` + // Description can be used for keeping any text fields that builder doesn't parse + Description map[string]string `protobuf:"bytes,2,rep,name=description" json:"description,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + WorkerConstraint *WorkerConstraint `protobuf:"bytes,3,opt,name=worker_constraint,json=workerConstraint" json:"worker_constraint,omitempty"` + ExportCache *ExportCache `protobuf:"bytes,4,opt,name=export_cache,json=exportCache" json:"export_cache,omitempty"` +} + +func (m *OpMetadata) Reset() { *m = OpMetadata{} } +func (m *OpMetadata) String() string { return proto.CompactTextString(m) } +func (*OpMetadata) ProtoMessage() {} +func (*OpMetadata) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{11} } + +func (m *OpMetadata) GetIgnoreCache() bool { + if m != nil { + return m.IgnoreCache + } + return false +} + +func (m *OpMetadata) GetDescription() map[string]string { + if m != nil { + return m.Description + } + return nil +} + +func (m *OpMetadata) GetWorkerConstraint() *WorkerConstraint { + if m != nil { + return m.WorkerConstraint + } + return nil +} + +func (m *OpMetadata) GetExportCache() *ExportCache { + if m != nil { + return m.ExportCache + } + return nil +} + +type ExportCache struct { + Value bool `protobuf:"varint,1,opt,name=Value,proto3" json:"Value,omitempty"` +} + +func (m *ExportCache) Reset() { *m = ExportCache{} } +func (m *ExportCache) String() string { return proto.CompactTextString(m) } +func (*ExportCache) ProtoMessage() {} +func (*ExportCache) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{12} } + +func (m *ExportCache) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +type ProxyEnv struct { + HttpProxy string `protobuf:"bytes,1,opt,name=http_proxy,json=httpProxy,proto3" json:"http_proxy,omitempty"` + HttpsProxy string `protobuf:"bytes,2,opt,name=https_proxy,json=httpsProxy,proto3" json:"https_proxy,omitempty"` + FtpProxy string `protobuf:"bytes,3,opt,name=ftp_proxy,json=ftpProxy,proto3" json:"ftp_proxy,omitempty"` + NoProxy string `protobuf:"bytes,4,opt,name=no_proxy,json=noProxy,proto3" json:"no_proxy,omitempty"` +} + +func (m *ProxyEnv) Reset() { *m = ProxyEnv{} } +func (m *ProxyEnv) String() string { return proto.CompactTextString(m) } +func (*ProxyEnv) ProtoMessage() {} +func (*ProxyEnv) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{13} } + +func (m *ProxyEnv) GetHttpProxy() string { + if m != nil { + return m.HttpProxy + } + return "" +} + +func (m *ProxyEnv) GetHttpsProxy() string { + if m != nil { + return m.HttpsProxy + } + return "" +} + +func (m *ProxyEnv) GetFtpProxy() string { + if m != nil { + return m.FtpProxy + } + return "" +} + +func (m *ProxyEnv) GetNoProxy() string { + if m != nil { + return m.NoProxy + } + return "" +} + +// WorkerConstraint is experimental and likely to be changed. +type WorkerConstraint struct { + Filter []string `protobuf:"bytes,1,rep,name=filter" json:"filter,omitempty"` +} + +func (m *WorkerConstraint) Reset() { *m = WorkerConstraint{} } +func (m *WorkerConstraint) String() string { return proto.CompactTextString(m) } +func (*WorkerConstraint) ProtoMessage() {} +func (*WorkerConstraint) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{14} } + +func (m *WorkerConstraint) GetFilter() []string { + if m != nil { + return m.Filter + } + return nil +} + +// Definition is the LLB definition structure with per-vertex metadata entries +type Definition struct { + // def is a list of marshaled Op messages + Def [][]byte `protobuf:"bytes,1,rep,name=def" json:"def,omitempty"` + // metadata contains metadata for the each of the Op messages. + // A key must be an LLB op digest string. Currently, empty string is not expected as a key, but it may change in the future. + Metadata map[github_com_opencontainers_go_digest.Digest]OpMetadata `protobuf:"bytes,2,rep,name=metadata,castkey=github.com/opencontainers/go-digest.Digest" json:"metadata" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Definition) Reset() { *m = Definition{} } +func (m *Definition) String() string { return proto.CompactTextString(m) } +func (*Definition) ProtoMessage() {} +func (*Definition) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{15} } + +func (m *Definition) GetDef() [][]byte { + if m != nil { + return m.Def + } + return nil +} + +func (m *Definition) GetMetadata() map[github_com_opencontainers_go_digest.Digest]OpMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func init() { + proto.RegisterType((*Op)(nil), "pb.Op") + proto.RegisterType((*Input)(nil), "pb.Input") + proto.RegisterType((*ExecOp)(nil), "pb.ExecOp") + proto.RegisterType((*Meta)(nil), "pb.Meta") + proto.RegisterType((*Mount)(nil), "pb.Mount") + proto.RegisterType((*CacheOpt)(nil), "pb.CacheOpt") + proto.RegisterType((*CopyOp)(nil), "pb.CopyOp") + proto.RegisterType((*CopySource)(nil), "pb.CopySource") + proto.RegisterType((*SourceOp)(nil), "pb.SourceOp") + proto.RegisterType((*BuildOp)(nil), "pb.BuildOp") + proto.RegisterType((*BuildInput)(nil), "pb.BuildInput") + proto.RegisterType((*OpMetadata)(nil), "pb.OpMetadata") + proto.RegisterType((*ExportCache)(nil), "pb.ExportCache") + proto.RegisterType((*ProxyEnv)(nil), "pb.ProxyEnv") + proto.RegisterType((*WorkerConstraint)(nil), "pb.WorkerConstraint") + proto.RegisterType((*Definition)(nil), "pb.Definition") + proto.RegisterEnum("pb.MountType", MountType_name, MountType_value) +} +func (m *Op) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Op) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Inputs) > 0 { + for _, msg := range m.Inputs { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Op != nil { + nn1, err := m.Op.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn1 + } + return i, nil +} + +func (m *Op_Exec) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Exec != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Exec.Size())) + n2, err := m.Exec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} +func (m *Op_Source) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Source != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Source.Size())) + n3, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} +func (m *Op_Copy) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Copy != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Copy.Size())) + n4, err := m.Copy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} +func (m *Op_Build) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Build != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Build.Size())) + n5, err := m.Build.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} +func (m *Input) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Input) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Digest) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Digest))) + i += copy(dAtA[i:], m.Digest) + } + if m.Index != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Index)) + } + return i, nil +} + +func (m *ExecOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Meta != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Meta.Size())) + n6, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if len(m.Mounts) > 0 { + for _, msg := range m.Mounts { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Meta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Meta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Args) > 0 { + for _, s := range m.Args { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Env) > 0 { + for _, s := range m.Env { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Cwd) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Cwd))) + i += copy(dAtA[i:], m.Cwd) + } + if len(m.User) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + if m.ProxyEnv != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintOps(dAtA, i, uint64(m.ProxyEnv.Size())) + n7, err := m.ProxyEnv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *Mount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Input != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Input)) + } + if len(m.Selector) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Selector))) + i += copy(dAtA[i:], m.Selector) + } + if len(m.Dest) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Dest))) + i += copy(dAtA[i:], m.Dest) + } + if m.Output != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Output)) + } + if m.Readonly { + dAtA[i] = 0x28 + i++ + if m.Readonly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.MountType != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.MountType)) + } + if m.CacheOpt != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.CacheOpt.Size())) + n8, err := m.CacheOpt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *CacheOpt) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CacheOpt) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + return i, nil +} + +func (m *CopyOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CopyOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Src) > 0 { + for _, msg := range m.Src { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Dest) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Dest))) + i += copy(dAtA[i:], m.Dest) + } + return i, nil +} + +func (m *CopySource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CopySource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Input != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Input)) + } + if len(m.Selector) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Selector))) + i += copy(dAtA[i:], m.Selector) + } + return i, nil +} + +func (m *SourceOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Identifier) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Identifier))) + i += copy(dAtA[i:], m.Identifier) + } + if len(m.Attrs) > 0 { + for k, _ := range m.Attrs { + dAtA[i] = 0x12 + i++ + v := m.Attrs[k] + mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + i = encodeVarintOps(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *BuildOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Builder != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Builder)) + } + if len(m.Inputs) > 0 { + for k, _ := range m.Inputs { + dAtA[i] = 0x12 + i++ + v := m.Inputs[k] + msgSize := 0 + if v != nil { + msgSize = v.Size() + msgSize += 1 + sovOps(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovOps(uint64(len(k))) + msgSize + i = encodeVarintOps(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(v.Size())) + n9, err := v.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + } + } + if m.Def != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Def.Size())) + n10, err := m.Def.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if len(m.Attrs) > 0 { + for k, _ := range m.Attrs { + dAtA[i] = 0x22 + i++ + v := m.Attrs[k] + mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + i = encodeVarintOps(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *BuildInput) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildInput) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Input != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Input)) + } + return i, nil +} + +func (m *OpMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.IgnoreCache { + dAtA[i] = 0x8 + i++ + if m.IgnoreCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Description) > 0 { + for k, _ := range m.Description { + dAtA[i] = 0x12 + i++ + v := m.Description[k] + mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + i = encodeVarintOps(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.WorkerConstraint != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintOps(dAtA, i, uint64(m.WorkerConstraint.Size())) + n11, err := m.WorkerConstraint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.ExportCache != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.ExportCache.Size())) + n12, err := m.ExportCache.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} + +func (m *ExportCache) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExportCache) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value { + dAtA[i] = 0x8 + i++ + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ProxyEnv) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProxyEnv) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.HttpProxy) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.HttpProxy))) + i += copy(dAtA[i:], m.HttpProxy) + } + if len(m.HttpsProxy) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.HttpsProxy))) + i += copy(dAtA[i:], m.HttpsProxy) + } + if len(m.FtpProxy) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.FtpProxy))) + i += copy(dAtA[i:], m.FtpProxy) + } + if len(m.NoProxy) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.NoProxy))) + i += copy(dAtA[i:], m.NoProxy) + } + return i, nil +} + +func (m *WorkerConstraint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkerConstraint) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Filter) > 0 { + for _, s := range m.Filter { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *Definition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Definition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Def) > 0 { + for _, b := range m.Def { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(b))) + i += copy(dAtA[i:], b) + } + } + if len(m.Metadata) > 0 { + for k, _ := range m.Metadata { + dAtA[i] = 0x12 + i++ + v := m.Metadata[k] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovOps(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovOps(uint64(len(k))) + msgSize + i = encodeVarintOps(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64((&v).Size())) + n13, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + } + return i, nil +} + +func encodeVarintOps(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Op) Size() (n int) { + var l int + _ = l + if len(m.Inputs) > 0 { + for _, e := range m.Inputs { + l = e.Size() + n += 1 + l + sovOps(uint64(l)) + } + } + if m.Op != nil { + n += m.Op.Size() + } + return n +} + +func (m *Op_Exec) Size() (n int) { + var l int + _ = l + if m.Exec != nil { + l = m.Exec.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *Op_Source) Size() (n int) { + var l int + _ = l + if m.Source != nil { + l = m.Source.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *Op_Copy) Size() (n int) { + var l int + _ = l + if m.Copy != nil { + l = m.Copy.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *Op_Build) Size() (n int) { + var l int + _ = l + if m.Build != nil { + l = m.Build.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *Input) Size() (n int) { + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovOps(uint64(m.Index)) + } + return n +} + +func (m *ExecOp) Size() (n int) { + var l int + _ = l + if m.Meta != nil { + l = m.Meta.Size() + n += 1 + l + sovOps(uint64(l)) + } + if len(m.Mounts) > 0 { + for _, e := range m.Mounts { + l = e.Size() + n += 1 + l + sovOps(uint64(l)) + } + } + return n +} + +func (m *Meta) Size() (n int) { + var l int + _ = l + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovOps(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sovOps(uint64(l)) + } + } + l = len(m.Cwd) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.ProxyEnv != nil { + l = m.ProxyEnv.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *Mount) Size() (n int) { + var l int + _ = l + if m.Input != 0 { + n += 1 + sovOps(uint64(m.Input)) + } + l = len(m.Selector) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.Dest) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Output != 0 { + n += 1 + sovOps(uint64(m.Output)) + } + if m.Readonly { + n += 2 + } + if m.MountType != 0 { + n += 1 + sovOps(uint64(m.MountType)) + } + if m.CacheOpt != nil { + l = m.CacheOpt.Size() + n += 2 + l + sovOps(uint64(l)) + } + return n +} + +func (m *CacheOpt) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *CopyOp) Size() (n int) { + var l int + _ = l + if len(m.Src) > 0 { + for _, e := range m.Src { + l = e.Size() + n += 1 + l + sovOps(uint64(l)) + } + } + l = len(m.Dest) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *CopySource) Size() (n int) { + var l int + _ = l + if m.Input != 0 { + n += 1 + sovOps(uint64(m.Input)) + } + l = len(m.Selector) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *SourceOp) Size() (n int) { + var l int + _ = l + l = len(m.Identifier) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if len(m.Attrs) > 0 { + for k, v := range m.Attrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + return n +} + +func (m *BuildOp) Size() (n int) { + var l int + _ = l + if m.Builder != 0 { + n += 1 + sovOps(uint64(m.Builder)) + } + if len(m.Inputs) > 0 { + for k, v := range m.Inputs { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovOps(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + l + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + if m.Def != nil { + l = m.Def.Size() + n += 1 + l + sovOps(uint64(l)) + } + if len(m.Attrs) > 0 { + for k, v := range m.Attrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + return n +} + +func (m *BuildInput) Size() (n int) { + var l int + _ = l + if m.Input != 0 { + n += 1 + sovOps(uint64(m.Input)) + } + return n +} + +func (m *OpMetadata) Size() (n int) { + var l int + _ = l + if m.IgnoreCache { + n += 2 + } + if len(m.Description) > 0 { + for k, v := range m.Description { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + if m.WorkerConstraint != nil { + l = m.WorkerConstraint.Size() + n += 1 + l + sovOps(uint64(l)) + } + if m.ExportCache != nil { + l = m.ExportCache.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *ExportCache) Size() (n int) { + var l int + _ = l + if m.Value { + n += 2 + } + return n +} + +func (m *ProxyEnv) Size() (n int) { + var l int + _ = l + l = len(m.HttpProxy) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.HttpsProxy) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.FtpProxy) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.NoProxy) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *WorkerConstraint) Size() (n int) { + var l int + _ = l + if len(m.Filter) > 0 { + for _, s := range m.Filter { + l = len(s) + n += 1 + l + sovOps(uint64(l)) + } + } + return n +} + +func (m *Definition) Size() (n int) { + var l int + _ = l + if len(m.Def) > 0 { + for _, b := range m.Def { + l = len(b) + n += 1 + l + sovOps(uint64(l)) + } + } + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + l + sovOps(uint64(l)) + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + return n +} + +func sovOps(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozOps(x uint64) (n int) { + return sovOps(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Op) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Op: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Op: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Inputs = append(m.Inputs, &Input{}) + if err := m.Inputs[len(m.Inputs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ExecOp{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Op = &Op_Exec{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SourceOp{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Op = &Op_Source{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Copy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &CopyOp{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Op = &Op_Copy{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Build", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &BuildOp{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Op = &Op_Build{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Input) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Input: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Input: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (OutputIndex(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Meta == nil { + m.Meta = &Meta{} + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mounts = append(m.Mounts, &Mount{}) + if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Meta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Meta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Meta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cwd", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cwd = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProxyEnv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProxyEnv == nil { + m.ProxyEnv = &ProxyEnv{} + } + if err := m.ProxyEnv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + } + m.Input = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Input |= (InputIndex(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) + } + m.Output = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Output |= (OutputIndex(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Readonly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Readonly = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MountType", wireType) + } + m.MountType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MountType |= (MountType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CacheOpt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CacheOpt == nil { + m.CacheOpt = &CacheOpt{} + } + if err := m.CacheOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CacheOpt) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CacheOpt: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CacheOpt: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CopyOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CopyOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CopyOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Src", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Src = append(m.Src, &CopySource{}) + if err := m.Src[len(m.Src)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CopySource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CopySource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CopySource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + } + m.Input = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Input |= (InputIndex(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Identifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attrs == nil { + m.Attrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attrs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Builder", wireType) + } + m.Builder = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Builder |= (InputIndex(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Inputs == nil { + m.Inputs = make(map[string]*BuildInput) + } + var mapkey string + var mapvalue *BuildInput + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthOps + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthOps + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &BuildInput{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Inputs[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Def == nil { + m.Def = &Definition{} + } + if err := m.Def.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attrs == nil { + m.Attrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attrs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildInput) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildInput: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildInput: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + } + m.Input = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Input |= (InputIndex(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OpMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OpMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OpMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreCache", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreCache = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Description == nil { + m.Description = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Description[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkerConstraint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WorkerConstraint == nil { + m.WorkerConstraint = &WorkerConstraint{} + } + if err := m.WorkerConstraint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExportCache", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExportCache == nil { + m.ExportCache = &ExportCache{} + } + if err := m.ExportCache.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExportCache) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportCache: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportCache: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProxyEnv) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProxyEnv: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProxyEnv: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HttpProxy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpsProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HttpsProxy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FtpProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FtpProxy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NoProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NoProxy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkerConstraint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkerConstraint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkerConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Definition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Definition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Definition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Def = append(m.Def, make([]byte, postIndex-iNdEx)) + copy(m.Def[len(m.Def)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[github_com_opencontainers_go_digest.Digest]OpMetadata) + } + var mapkey github_com_opencontainers_go_digest.Digest + mapvalue := &OpMetadata{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthOps + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthOps + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &OpMetadata{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[github_com_opencontainers_go_digest.Digest(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipOps(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOps + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOps + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOps + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthOps + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOps + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipOps(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthOps = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowOps = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("ops.proto", fileDescriptorOps) } + +var fileDescriptorOps = []byte{ + // 1062 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xdd, 0x6e, 0x1b, 0xc5, + 0x17, 0xcf, 0xae, 0x3f, 0xb2, 0x7b, 0x36, 0xed, 0xdf, 0xff, 0x21, 0x2a, 0xc6, 0x94, 0xc4, 0x6c, + 0x11, 0x72, 0xd3, 0xc6, 0x91, 0x8c, 0x84, 0x2a, 0x2e, 0x2a, 0xe2, 0x0f, 0x14, 0x83, 0x42, 0xaa, + 0x49, 0x04, 0x97, 0x91, 0xbd, 0x1e, 0x3b, 0xab, 0x3a, 0x3b, 0xab, 0xdd, 0xd9, 0x24, 0xbe, 0x00, + 0x89, 0x3e, 0x01, 0x12, 0x4f, 0xc1, 0x43, 0xc0, 0x75, 0x2f, 0xb9, 0x85, 0x8b, 0x82, 0xc2, 0x8b, + 0xa0, 0x73, 0x66, 0xbc, 0xeb, 0x86, 0x22, 0xb5, 0x82, 0x2b, 0xcf, 0x9c, 0xf3, 0x3b, 0x67, 0xce, + 0xf9, 0x9d, 0x8f, 0x35, 0xb8, 0x32, 0x4e, 0xdb, 0x71, 0x22, 0x95, 0x64, 0x76, 0x3c, 0x6e, 0xec, + 0xce, 0x42, 0x75, 0x96, 0x8d, 0xdb, 0x81, 0x3c, 0xdf, 0x9b, 0xc9, 0x99, 0xdc, 0x23, 0xd5, 0x38, + 0x9b, 0xd2, 0x8d, 0x2e, 0x74, 0xd2, 0x26, 0xfe, 0xcf, 0x16, 0xd8, 0x47, 0x31, 0x7b, 0x1f, 0xaa, + 0x61, 0x14, 0x67, 0x2a, 0xad, 0x5b, 0xcd, 0x52, 0xcb, 0xeb, 0xb8, 0xed, 0x78, 0xdc, 0x1e, 0xa2, + 0x84, 0x1b, 0x05, 0x6b, 0x42, 0x59, 0x5c, 0x89, 0xa0, 0x6e, 0x37, 0xad, 0x96, 0xd7, 0x01, 0x04, + 0x0c, 0xae, 0x44, 0x70, 0x14, 0x1f, 0xac, 0x71, 0xd2, 0xb0, 0x0f, 0xa1, 0x9a, 0xca, 0x2c, 0x09, + 0x44, 0xbd, 0x44, 0x98, 0x0d, 0xc4, 0x1c, 0x93, 0x84, 0x50, 0x46, 0x8b, 0x9e, 0x02, 0x19, 0x2f, + 0xea, 0xe5, 0xc2, 0x53, 0x4f, 0xc6, 0x0b, 0xed, 0x09, 0x35, 0xec, 0x1e, 0x54, 0xc6, 0x59, 0x38, + 0x9f, 0xd4, 0x2b, 0x04, 0xf1, 0x10, 0xd2, 0x45, 0x01, 0x61, 0xb4, 0xae, 0x5b, 0x06, 0x5b, 0xc6, + 0xfe, 0xb7, 0x50, 0xa1, 0x38, 0xd9, 0xe7, 0x50, 0x9d, 0x84, 0x33, 0x91, 0xaa, 0xba, 0xd5, 0xb4, + 0x5a, 0x6e, 0xb7, 0xf3, 0xfc, 0xc5, 0xf6, 0xda, 0x6f, 0x2f, 0xb6, 0x77, 0x56, 0x08, 0x91, 0xb1, + 0x88, 0x02, 0x19, 0xa9, 0x51, 0x18, 0x89, 0x24, 0xdd, 0x9b, 0xc9, 0x5d, 0x6d, 0xd2, 0xee, 0xd3, + 0x0f, 0x37, 0x1e, 0xd8, 0x7d, 0xa8, 0x84, 0xd1, 0x44, 0x5c, 0x51, 0xb2, 0xa5, 0xee, 0x5b, 0xc6, + 0x95, 0x77, 0x94, 0xa9, 0x38, 0x53, 0x43, 0x54, 0x71, 0x8d, 0xf0, 0x87, 0x50, 0xd5, 0x34, 0xb0, + 0xbb, 0x50, 0x3e, 0x17, 0x6a, 0x44, 0xcf, 0x7b, 0x1d, 0x07, 0x63, 0x3e, 0x14, 0x6a, 0xc4, 0x49, + 0x8a, 0x0c, 0x9f, 0xcb, 0x2c, 0x52, 0x69, 0xdd, 0x2e, 0x18, 0x3e, 0x44, 0x09, 0x37, 0x0a, 0xff, + 0x1b, 0x28, 0xa3, 0x01, 0x63, 0x50, 0x1e, 0x25, 0x33, 0x5d, 0x0a, 0x97, 0xd3, 0x99, 0xd5, 0xa0, + 0x24, 0xa2, 0x0b, 0xb2, 0x75, 0x39, 0x1e, 0x51, 0x12, 0x5c, 0x4e, 0x88, 0x6a, 0x97, 0xe3, 0x11, + 0xed, 0xb2, 0x54, 0x24, 0xc4, 0xab, 0xcb, 0xe9, 0xcc, 0xee, 0x83, 0x1b, 0x27, 0xf2, 0x6a, 0x71, + 0x8a, 0xd6, 0x95, 0xa2, 0x2c, 0x4f, 0x50, 0x38, 0x88, 0x2e, 0xb8, 0x13, 0x9b, 0x93, 0xff, 0x9d, + 0x0d, 0x15, 0x0a, 0x88, 0xb5, 0x30, 0xfd, 0x38, 0xd3, 0x4c, 0x96, 0xba, 0xcc, 0xa4, 0x0f, 0x44, + 0x74, 0x9e, 0x3d, 0x92, 0xde, 0x00, 0x27, 0x15, 0x73, 0x11, 0x28, 0x99, 0x10, 0x57, 0x2e, 0xcf, + 0xef, 0x18, 0xce, 0x04, 0xcb, 0xa1, 0x23, 0xa4, 0x33, 0x7b, 0x00, 0x55, 0x49, 0x1c, 0x52, 0x90, + 0xff, 0xc0, 0xac, 0x81, 0xa0, 0xf3, 0x44, 0x8c, 0x26, 0x32, 0x9a, 0x2f, 0x28, 0x74, 0x87, 0xe7, + 0x77, 0xf6, 0x00, 0x5c, 0x62, 0xed, 0x64, 0x11, 0x8b, 0x7a, 0xb5, 0x69, 0xb5, 0x6e, 0x77, 0x6e, + 0xe5, 0x8c, 0xa2, 0x90, 0x17, 0x7a, 0xd6, 0x02, 0x27, 0x18, 0x05, 0x67, 0xe2, 0x28, 0x56, 0xf5, + 0xcd, 0x82, 0x83, 0x9e, 0x91, 0xf1, 0x5c, 0xeb, 0x37, 0xc0, 0x59, 0x4a, 0xd9, 0x6d, 0xb0, 0x87, + 0x7d, 0xdd, 0x4c, 0xdc, 0x1e, 0xf6, 0xfd, 0xc7, 0x50, 0xd5, 0x6d, 0xca, 0x9a, 0x50, 0x4a, 0x93, + 0xc0, 0x8c, 0xca, 0xed, 0x65, 0xff, 0xea, 0x4e, 0xe7, 0xa8, 0xca, 0x73, 0xb7, 0x8b, 0xdc, 0x7d, + 0x0e, 0x50, 0xc0, 0xfe, 0x1b, 0x8e, 0xfd, 0x1f, 0x2c, 0x70, 0x96, 0x13, 0xc6, 0xb6, 0x00, 0xc2, + 0x89, 0x88, 0x54, 0x38, 0x0d, 0x45, 0x62, 0x02, 0x5f, 0x91, 0xb0, 0x5d, 0xa8, 0x8c, 0x94, 0x4a, + 0x96, 0x1d, 0xf8, 0xf6, 0xea, 0x78, 0xb6, 0xf7, 0x51, 0x33, 0x88, 0x54, 0xb2, 0xe0, 0x1a, 0xd5, + 0x78, 0x04, 0x50, 0x08, 0xb1, 0xdd, 0x9e, 0x8a, 0x85, 0xf1, 0x8a, 0x47, 0xb6, 0x09, 0x95, 0x8b, + 0xd1, 0x3c, 0x13, 0x26, 0x28, 0x7d, 0xf9, 0xc4, 0x7e, 0x64, 0xf9, 0x3f, 0xd9, 0xb0, 0x6e, 0xc6, + 0x95, 0x3d, 0x84, 0x75, 0x1a, 0x57, 0x13, 0xd1, 0xab, 0x33, 0x5d, 0x42, 0xd8, 0x5e, 0xbe, 0x87, + 0x56, 0x62, 0x34, 0xae, 0xf4, 0x3e, 0x32, 0x31, 0x16, 0x5b, 0xa9, 0x34, 0x11, 0x53, 0xb3, 0x70, + 0xa8, 0x14, 0x7d, 0x31, 0x0d, 0xa3, 0x50, 0x85, 0x32, 0xe2, 0xa8, 0x62, 0x0f, 0x97, 0x59, 0x97, + 0xc9, 0xe3, 0x9d, 0x55, 0x8f, 0x7f, 0x4f, 0x7a, 0x08, 0xde, 0xca, 0x33, 0xaf, 0xc8, 0xfa, 0x83, + 0xd5, 0xac, 0xcd, 0x93, 0xe4, 0x4e, 0x6f, 0xcb, 0x82, 0x85, 0x7f, 0xc1, 0xdf, 0xc7, 0x00, 0x85, + 0xcb, 0xd7, 0xef, 0x14, 0xff, 0x47, 0x1b, 0xe0, 0x28, 0xc6, 0x1d, 0x32, 0x19, 0xd1, 0xca, 0xd9, + 0x08, 0x67, 0x91, 0x4c, 0xc4, 0x29, 0xf5, 0x37, 0xd9, 0x3b, 0xdc, 0xd3, 0x32, 0x6a, 0x73, 0xb6, + 0x0f, 0xde, 0x44, 0xa4, 0x41, 0x12, 0xc6, 0x48, 0x98, 0x21, 0x7d, 0x1b, 0x73, 0x2a, 0xfc, 0xb4, + 0xfb, 0x05, 0x42, 0x73, 0xb5, 0x6a, 0xc3, 0xf6, 0xe1, 0xff, 0x97, 0x32, 0x79, 0x2a, 0x92, 0xd3, + 0x40, 0x46, 0xa9, 0x4a, 0x46, 0x61, 0xa4, 0x4c, 0x3d, 0x36, 0xd1, 0xd1, 0xd7, 0xa4, 0xec, 0xe5, + 0x3a, 0x5e, 0xbb, 0xbc, 0x21, 0x61, 0x1d, 0xd8, 0x10, 0x57, 0xb1, 0x4c, 0x94, 0x09, 0x54, 0x7f, + 0x18, 0xfe, 0xa7, 0x3f, 0x31, 0x28, 0xa7, 0x60, 0xb9, 0x27, 0x8a, 0x4b, 0xe3, 0x31, 0xd4, 0x6e, + 0xc6, 0xf5, 0x46, 0x1c, 0xdf, 0x03, 0x6f, 0xc5, 0x37, 0x02, 0xbf, 0x22, 0xa0, 0x26, 0x49, 0x5f, + 0xfc, 0x67, 0x16, 0x38, 0xcb, 0x4d, 0xc9, 0xde, 0x03, 0x38, 0x53, 0x2a, 0x3e, 0xa5, 0x85, 0x69, + 0x1e, 0x71, 0x51, 0x42, 0x08, 0xb6, 0x0d, 0x1e, 0x5e, 0x52, 0xa3, 0xd7, 0x0f, 0x92, 0x45, 0xaa, + 0x01, 0xef, 0x82, 0x3b, 0xcd, 0xcd, 0xf5, 0x52, 0x74, 0xa6, 0x4b, 0xeb, 0x77, 0xc0, 0x89, 0xa4, + 0xd1, 0xe9, 0xfd, 0xbd, 0x1e, 0x49, 0x52, 0xf9, 0x3b, 0x50, 0xbb, 0xc9, 0x21, 0xbb, 0x03, 0xd5, + 0x69, 0x38, 0x57, 0x34, 0x54, 0xf8, 0x45, 0x30, 0x37, 0xff, 0x57, 0x0b, 0xa0, 0x18, 0x00, 0x24, + 0x04, 0xa7, 0x03, 0x31, 0x1b, 0x7a, 0x1a, 0xe6, 0xe0, 0x9c, 0x9b, 0xba, 0x9a, 0x6a, 0xdf, 0x7d, + 0x79, 0x68, 0xda, 0xcb, 0xb2, 0x13, 0xa5, 0xfa, 0x2b, 0xfa, 0xec, 0xf7, 0x37, 0xfa, 0x8a, 0xe6, + 0x2f, 0x34, 0xbe, 0x80, 0x5b, 0x2f, 0xb9, 0x7b, 0xcd, 0x79, 0x2a, 0x7a, 0x6f, 0xa5, 0x62, 0x3b, + 0x9f, 0x82, 0x9b, 0x6f, 0x77, 0xe6, 0x40, 0xb9, 0x3b, 0xfc, 0xb2, 0x5f, 0x5b, 0x63, 0x00, 0xd5, + 0xe3, 0x41, 0x8f, 0x0f, 0x4e, 0x6a, 0x16, 0x5b, 0x87, 0xd2, 0xf1, 0xf1, 0x41, 0xcd, 0x66, 0x2e, + 0x54, 0x7a, 0xfb, 0xbd, 0x83, 0x41, 0xad, 0x84, 0xc7, 0x93, 0xc3, 0x27, 0x9f, 0x1d, 0xd7, 0xca, + 0xdd, 0xda, 0xf3, 0xeb, 0x2d, 0xeb, 0x97, 0xeb, 0x2d, 0xeb, 0x8f, 0xeb, 0x2d, 0xeb, 0xfb, 0x3f, + 0xb7, 0xd6, 0xc6, 0x55, 0xfa, 0x17, 0xf4, 0xd1, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa8, 0x76, + 0x25, 0x54, 0x45, 0x09, 0x00, 0x00, +} diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.proto b/vendor/github.com/moby/buildkit/solver/pb/ops.proto new file mode 100644 index 0000000000..8b7af2d404 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/pb/ops.proto @@ -0,0 +1,137 @@ +syntax = "proto3"; + +// Package pb provides the protobuf definition of LLB: low-level builder instruction. +// LLB is DAG-structured; Op represents a vertex, and Definition represents a graph. +package pb; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +// Op represents a vertex of the LLB DAG. +message Op { + // inputs is a set of input edges. + repeated Input inputs = 1; + oneof op { + ExecOp exec = 2; + SourceOp source = 3; + CopyOp copy = 4; + BuildOp build = 5; + } +} + +// Input represents an input edge for an Op. +message Input { + // digest of the marshaled input Op + string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + // output index of the input Op + int64 index = 2 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; +} + +// ExecOp executes a command in a container. +message ExecOp { + Meta meta = 1; + repeated Mount mounts = 2; +} + +// Meta is a set of arguments for ExecOp. +// Meta is unrelated to LLB metadata. +// FIXME: rename (ExecContext? ExecArgs?) +message Meta { + repeated string args = 1; + repeated string env = 2; + string cwd = 3; + string user = 4; + ProxyEnv proxy_env = 5; +} + +// Mount specifies how to mount an input Op as a filesystem. +message Mount { + int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; + string selector = 2; + string dest = 3; + int64 output = 4 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; + bool readonly = 5; + MountType mountType = 6; + CacheOpt cacheOpt = 20; +} + +enum MountType { + BIND = 0; + SECRET = 1; + SSH = 2; + CACHE = 3; + TMPFS = 4; +} + +message CacheOpt { + string ID = 1; +} + +// CopyOp copies files across Ops. +message CopyOp { + repeated CopySource src = 1; + string dest = 2; +} + +// CopySource specifies a source for CopyOp. +message CopySource { + int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; + string selector = 2; +} + +// SourceOp specifies a source such as build contexts and images. +message SourceOp { + // TODO: use source type or any type instead of URL protocol. + // identifier e.g. local://, docker-image://, git://, https://... + string identifier = 1; + // attrs are defined in attr.go + map attrs = 2; +} + +// BuildOp is used for nested build invocation. +message BuildOp { + int64 builder = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; + map inputs = 2; + Definition def = 3; + map attrs = 4; + // outputs +} + +// BuildInput is used for BuildOp. +message BuildInput { + int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; +} + +// OpMetadata is a per-vertex metadata entry, which can be defined for arbitrary Op vertex and overridable on the run time. +message OpMetadata { + // ignore_cache specifies to ignore the cache for this Op. + bool ignore_cache = 1; + // Description can be used for keeping any text fields that builder doesn't parse + map description = 2; + WorkerConstraint worker_constraint = 3; + ExportCache export_cache = 4; +} + +message ExportCache { + bool Value = 1; +} + +message ProxyEnv { + string http_proxy = 1; + string https_proxy = 2; + string ftp_proxy = 3; + string no_proxy = 4; +} + +// WorkerConstraint is experimental and likely to be changed. +message WorkerConstraint { + repeated string filter = 1; // containerd-style filter +} + +// Definition is the LLB definition structure with per-vertex metadata entries +message Definition { + // def is a list of marshaled Op messages + repeated bytes def = 1; + // metadata contains metadata for the each of the Op messages. + // A key must be an LLB op digest string. Currently, empty string is not expected as a key, but it may change in the future. + map metadata = 2 [(gogoproto.castkey) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; +} diff --git a/vendor/github.com/moby/buildkit/solver/progress.go b/vendor/github.com/moby/buildkit/solver/progress.go new file mode 100644 index 0000000000..14f3f5e019 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/progress.go @@ -0,0 +1,109 @@ +package solver + +import ( + "context" + "io" + "time" + + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/util/progress" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +func (j *Job) Status(ctx context.Context, ch chan *client.SolveStatus) error { + vs := &vertexStream{cache: map[digest.Digest]*client.Vertex{}} + pr := j.pr.Reader(ctx) + defer func() { + if enc := vs.encore(); len(enc) > 0 { + ch <- &client.SolveStatus{Vertexes: enc} + } + close(ch) + }() + + for { + p, err := pr.Read(ctx) + if err != nil { + if err == io.EOF { + return nil + } + return err + } + ss := &client.SolveStatus{} + for _, p := range p { + switch v := p.Sys.(type) { + case client.Vertex: + ss.Vertexes = append(ss.Vertexes, vs.append(v)...) + + case progress.Status: + vtx, ok := p.Meta("vertex") + if !ok { + logrus.Warnf("progress %s status without vertex info", p.ID) + continue + } + vs := &client.VertexStatus{ + ID: p.ID, + Vertex: vtx.(digest.Digest), + Name: v.Action, + Total: int64(v.Total), + Current: int64(v.Current), + Timestamp: p.Timestamp, + Started: v.Started, + Completed: v.Completed, + } + ss.Statuses = append(ss.Statuses, vs) + case client.VertexLog: + vtx, ok := p.Meta("vertex") + if !ok { + logrus.Warnf("progress %s log without vertex info", p.ID) + continue + } + v.Vertex = vtx.(digest.Digest) + v.Timestamp = p.Timestamp + ss.Logs = append(ss.Logs, &v) + } + } + select { + case <-ctx.Done(): + return ctx.Err() + case ch <- ss: + } + } +} + +type vertexStream struct { + cache map[digest.Digest]*client.Vertex +} + +func (vs *vertexStream) append(v client.Vertex) []*client.Vertex { + var out []*client.Vertex + vs.cache[v.Digest] = &v + if v.Started != nil { + for _, inp := range v.Inputs { + if inpv, ok := vs.cache[inp]; ok { + if !inpv.Cached && inpv.Completed == nil { + inpv.Cached = true + inpv.Started = v.Started + inpv.Completed = v.Started + out = append(out, vs.append(*inpv)...) + delete(vs.cache, inp) + } + } + } + } + vcopy := v + return append(out, &vcopy) +} + +func (vs *vertexStream) encore() []*client.Vertex { + var out []*client.Vertex + for _, v := range vs.cache { + if v.Started != nil && v.Completed == nil { + now := time.Now() + v.Completed = &now + v.Error = context.Canceled.Error() + out = append(out, v) + } + } + return out +} diff --git a/vendor/github.com/moby/buildkit/solver/result.go b/vendor/github.com/moby/buildkit/solver/result.go new file mode 100644 index 0000000000..641cf14f03 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/result.go @@ -0,0 +1,105 @@ +package solver + +import ( + "context" + "sync" + "sync/atomic" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// SharedResult is a result that can be cloned +type SharedResult struct { + mu sync.Mutex + main Result +} + +func NewSharedResult(main Result) *SharedResult { + return &SharedResult{main: main} +} + +func (r *SharedResult) Clone() Result { + r.mu.Lock() + defer r.mu.Unlock() + + r1, r2 := dup(r.main) + r.main = r1 + return r2 +} + +func (r *SharedResult) Release(ctx context.Context) error { + r.mu.Lock() + defer r.mu.Unlock() + return r.main.Release(ctx) +} + +func dup(res Result) (Result, Result) { + sem := int64(0) + return &splitResult{Result: res, sem: &sem}, &splitResult{Result: res, sem: &sem} +} + +type splitResult struct { + Result + released int64 + sem *int64 +} + +func (r *splitResult) Release(ctx context.Context) error { + if atomic.AddInt64(&r.released, 1) > 1 { + err := errors.Errorf("releasing already released reference") + logrus.Error(err) + return err + } + if atomic.AddInt64(r.sem, 1) == 2 { + return r.Result.Release(ctx) + } + return nil +} + +// NewCachedResult combines a result and cache key into cached result +func NewCachedResult(res Result, k ExportableCacheKey) CachedResult { + return &cachedResult{res, k} +} + +type cachedResult struct { + Result + k ExportableCacheKey +} + +func (cr *cachedResult) CacheKey() ExportableCacheKey { + return cr.k +} + +func NewSharedCachedResult(res CachedResult) *SharedCachedResult { + return &SharedCachedResult{ + SharedResult: NewSharedResult(res), + CachedResult: res, + } +} + +func (r *SharedCachedResult) Clone() CachedResult { + return &clonedCachedResult{Result: r.SharedResult.Clone(), cr: r.CachedResult} +} + +func (r *SharedCachedResult) Release(ctx context.Context) error { + return r.SharedResult.Release(ctx) +} + +type clonedCachedResult struct { + Result + cr CachedResult +} + +func (r *clonedCachedResult) ID() string { + return r.Result.ID() +} + +func (cr *clonedCachedResult) CacheKey() ExportableCacheKey { + return cr.cr.CacheKey() +} + +type SharedCachedResult struct { + *SharedResult + CachedResult +} diff --git a/vendor/github.com/moby/buildkit/solver/scheduler.go b/vendor/github.com/moby/buildkit/solver/scheduler.go new file mode 100644 index 0000000000..7336f619e0 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/scheduler.go @@ -0,0 +1,396 @@ +package solver + +import ( + "context" + "sync" + + "github.com/moby/buildkit/solver/internal/pipe" + "github.com/moby/buildkit/util/cond" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const debugScheduler = false // TODO: replace with logs in build trace + +func newScheduler(ef edgeFactory) *scheduler { + s := &scheduler{ + waitq: map[*edge]struct{}{}, + incoming: map[*edge][]*edgePipe{}, + outgoing: map[*edge][]*edgePipe{}, + + stopped: make(chan struct{}), + closed: make(chan struct{}), + + ef: ef, + } + s.cond = cond.NewStatefulCond(&s.mu) + + go s.loop() + + return s +} + +type dispatcher struct { + next *dispatcher + e *edge +} + +type scheduler struct { + cond *cond.StatefulCond + mu sync.Mutex + muQ sync.Mutex + + ef edgeFactory + + waitq map[*edge]struct{} + next *dispatcher + last *dispatcher + stopped chan struct{} + stoppedOnce sync.Once + closed chan struct{} + + incoming map[*edge][]*edgePipe + outgoing map[*edge][]*edgePipe +} + +func (s *scheduler) Stop() { + s.stoppedOnce.Do(func() { + close(s.stopped) + }) + <-s.closed +} + +func (s *scheduler) loop() { + defer func() { + close(s.closed) + }() + + go func() { + <-s.stopped + s.mu.Lock() + s.cond.Signal() + s.mu.Unlock() + }() + + s.mu.Lock() + for { + select { + case <-s.stopped: + s.mu.Unlock() + return + default: + } + s.muQ.Lock() + l := s.next + if l != nil { + if l == s.last { + s.last = nil + } + s.next = l.next + delete(s.waitq, l.e) + } + s.muQ.Unlock() + if l == nil { + s.cond.Wait() + continue + } + s.dispatch(l.e) + } +} + +// dispatch schedules an edge to be processed +func (s *scheduler) dispatch(e *edge) { + inc := make([]pipe.Sender, len(s.incoming[e])) + for i, p := range s.incoming[e] { + inc[i] = p.Sender + } + out := make([]pipe.Receiver, len(s.outgoing[e])) + for i, p := range s.outgoing[e] { + out[i] = p.Receiver + } + + e.hasActiveOutgoing = false + updates := []pipe.Receiver{} + for _, p := range out { + if ok := p.Receive(); ok { + updates = append(updates, p) + } + if !p.Status().Completed { + e.hasActiveOutgoing = true + } + } + + // unpark the edge + debugSchedulerPreUnpark(e, inc, updates, out) + e.unpark(inc, updates, out, &pipeFactory{s: s, e: e}) + debugSchedulerPostUnpark(e, inc) + + // set up new requests that didn't complete/were added by this run + openIncoming := make([]*edgePipe, 0, len(inc)) + for _, r := range s.incoming[e] { + if !r.Sender.Status().Completed { + openIncoming = append(openIncoming, r) + } + } + if len(openIncoming) > 0 { + s.incoming[e] = openIncoming + } else { + delete(s.incoming, e) + } + + openOutgoing := make([]*edgePipe, 0, len(out)) + for _, r := range s.outgoing[e] { + if !r.Receiver.Status().Completed { + openOutgoing = append(openOutgoing, r) + } + } + if len(openOutgoing) > 0 { + s.outgoing[e] = openOutgoing + } else { + delete(s.outgoing, e) + } + + // if keys changed there might be possiblity for merge with other edge + if e.keysDidChange { + if k := e.currentIndexKey(); k != nil { + // skip this if not at least 1 key per dep + origEdge := e.index.LoadOrStore(k, e) + if origEdge != nil { + logrus.Debugf("merging edge %s to %s\n", e.edge.Vertex.Name(), origEdge.edge.Vertex.Name()) + if s.mergeTo(origEdge, e) { + s.ef.setEdge(e.edge, origEdge) + } + } + } + e.keysDidChange = false + } + + // validation to avoid deadlocks/resource leaks: + // TODO: if these start showing up in error reports they can be changed + // to error the edge instead. They can only appear from algorithm bugs in + // unpark(), not for any external input. + if len(openIncoming) > 0 && len(openOutgoing) == 0 { + panic("invalid dispatch: return leaving incoming open") + } + if len(openIncoming) == 0 && len(openOutgoing) > 0 { + panic("invalid dispatch: return leaving outgoing open") + } +} + +// signal notifies that an edge needs to be processed again +func (s *scheduler) signal(e *edge) { + s.muQ.Lock() + if _, ok := s.waitq[e]; !ok { + d := &dispatcher{e: e} + if s.last == nil { + s.next = d + } else { + s.last.next = d + } + s.last = d + s.waitq[e] = struct{}{} + s.cond.Signal() + } + s.muQ.Unlock() +} + +// build evaluates edge into a result +func (s *scheduler) build(ctx context.Context, edge Edge) (CachedResult, error) { + s.mu.Lock() + e := s.ef.getEdge(edge) + if e == nil { + s.mu.Unlock() + return nil, errors.Errorf("invalid request %v for build", edge) + } + + wait := make(chan struct{}) + + var p *pipe.Pipe + p = s.newPipe(e, nil, pipe.Request{Payload: &edgeRequest{desiredState: edgeStatusComplete}}) + p.OnSendCompletion = func() { + p.Receiver.Receive() + if p.Receiver.Status().Completed { + close(wait) + } + } + s.mu.Unlock() + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + go func() { + <-ctx.Done() + p.Receiver.Cancel() + }() + + <-wait + + if err := p.Receiver.Status().Err; err != nil { + return nil, err + } + return p.Receiver.Status().Value.(*edgeState).result.Clone(), nil +} + +// newPipe creates a new request pipe between two edges +func (s *scheduler) newPipe(target, from *edge, req pipe.Request) *pipe.Pipe { + p := &edgePipe{ + Pipe: pipe.New(req), + Target: target, + From: from, + } + + s.signal(target) + if from != nil { + p.OnSendCompletion = func() { + p.mu.Lock() + defer p.mu.Unlock() + s.signal(p.From) + } + s.outgoing[from] = append(s.outgoing[from], p) + } + s.incoming[target] = append(s.incoming[target], p) + p.OnReceiveCompletion = func() { + p.mu.Lock() + defer p.mu.Unlock() + s.signal(p.Target) + } + return p.Pipe +} + +// newRequestWithFunc creates a new request pipe that invokes a async function +func (s *scheduler) newRequestWithFunc(e *edge, f func(context.Context) (interface{}, error)) pipe.Receiver { + pp, start := pipe.NewWithFunction(f) + p := &edgePipe{ + Pipe: pp, + From: e, + } + p.OnSendCompletion = func() { + p.mu.Lock() + defer p.mu.Unlock() + s.signal(p.From) + } + s.outgoing[e] = append(s.outgoing[e], p) + go start() + return p.Receiver +} + +// mergeTo merges the state from one edge to another. source edge is discarded. +func (s *scheduler) mergeTo(target, src *edge) bool { + if !target.edge.Vertex.Options().IgnoreCache && src.edge.Vertex.Options().IgnoreCache { + return false + } + for _, inc := range s.incoming[src] { + inc.mu.Lock() + inc.Target = target + s.incoming[target] = append(s.incoming[target], inc) + inc.mu.Unlock() + } + + for _, out := range s.outgoing[src] { + out.mu.Lock() + out.From = target + s.outgoing[target] = append(s.outgoing[target], out) + out.mu.Unlock() + out.Receiver.Cancel() + } + + delete(s.incoming, src) + delete(s.outgoing, src) + s.signal(target) + + for i, d := range src.deps { + for _, k := range d.keys { + target.secondaryExporters = append(target.secondaryExporters, expDep{i, CacheKeyWithSelector{CacheKey: k, Selector: src.cacheMap.Deps[i].Selector}}) + } + if d.slowCacheKey != nil { + target.secondaryExporters = append(target.secondaryExporters, expDep{i, CacheKeyWithSelector{CacheKey: *d.slowCacheKey}}) + } + if d.result != nil { + target.secondaryExporters = append(target.secondaryExporters, expDep{i, CacheKeyWithSelector{CacheKey: d.result.CacheKey(), Selector: src.cacheMap.Deps[i].Selector}}) + } + } + + // TODO(tonistiigi): merge cache providers + + return true +} + +// edgeFactory allows access to the edges from a shared graph +type edgeFactory interface { + getEdge(Edge) *edge + setEdge(Edge, *edge) +} + +type pipeFactory struct { + e *edge + s *scheduler +} + +func (pf *pipeFactory) NewInputRequest(ee Edge, req *edgeRequest) pipe.Receiver { + target := pf.s.ef.getEdge(ee) + if target == nil { + panic("failed to get edge") // TODO: return errored pipe + } + p := pf.s.newPipe(target, pf.e, pipe.Request{Payload: req}) + if debugScheduler { + logrus.Debugf("> newPipe %s %p desiredState=%s", ee.Vertex.Name(), p, req.desiredState) + } + return p.Receiver +} + +func (pf *pipeFactory) NewFuncRequest(f func(context.Context) (interface{}, error)) pipe.Receiver { + p := pf.s.newRequestWithFunc(pf.e, f) + if debugScheduler { + logrus.Debugf("> newFunc %p", p) + } + return p +} + +func debugSchedulerPreUnpark(e *edge, inc []pipe.Sender, updates, allPipes []pipe.Receiver) { + if !debugScheduler { + return + } + logrus.Debugf(">> unpark %s req=%d upt=%d out=%d state=%s %s", e.edge.Vertex.Name(), len(inc), len(updates), len(allPipes), e.state, e.edge.Vertex.Digest()) + + for i, dep := range e.deps { + des := edgeStatusInitial + if dep.req != nil { + des = dep.req.Request().(*edgeRequest).desiredState + } + logrus.Debugf(":: dep%d %s state=%s des=%s keys=%s hasslowcache=%v", i, e.edge.Vertex.Inputs()[i].Vertex.Name(), dep.state, des, len(dep.keys), e.slowCacheFunc(dep) != nil) + } + + for i, in := range inc { + req := in.Request() + logrus.Debugf("> incoming-%d: %p dstate=%s canceled=%v", i, in, req.Payload.(*edgeRequest).desiredState, req.Canceled) + } + + for i, up := range updates { + if up == e.cacheMapReq { + logrus.Debugf("> update-%d: %p cacheMapReq complete=%v", i, up, up.Status().Completed) + } else if up == e.execReq { + logrus.Debugf("> update-%d: %p execReq complete=%v", i, up, up.Status().Completed) + } else { + st, ok := up.Status().Value.(*edgeState) + if ok { + index := -1 + if dep, ok := e.depRequests[up]; ok { + index = int(dep.index) + } + logrus.Debugf("> update-%d: %p input-%d keys=%d state=%s", i, up, index, len(st.keys), st.state) + } else { + logrus.Debugf("> update-%d: unknown", i) + } + } + } +} + +func debugSchedulerPostUnpark(e *edge, inc []pipe.Sender) { + if !debugScheduler { + return + } + for i, in := range inc { + logrus.Debugf("< incoming-%d: %p completed=%v", i, in, in.Status().Completed) + } + logrus.Debugf("<< unpark %s\n", e.edge.Vertex.Name()) +} diff --git a/vendor/github.com/moby/buildkit/solver/types.go b/vendor/github.com/moby/buildkit/solver/types.go new file mode 100644 index 0000000000..5de320b40c --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/types.go @@ -0,0 +1,168 @@ +package solver + +import ( + "context" + "time" + + "github.com/containerd/containerd/content" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Vertex is one node in the build graph +type Vertex interface { + // Digest is a content-addressable vertex identifier + Digest() digest.Digest + // Sys returns an internal value that is used to execute the vertex. Usually + // this is capured by the operation resolver method during solve. + Sys() interface{} + Options() VertexOptions + // Array of edges current vertex depends on. + Inputs() []Edge + Name() string +} + +// Index is a index value for output edge +type Index int + +// Edge is a path to a specific output of the vertex +type Edge struct { + Index Index + Vertex Vertex +} + +// VertexOptions has optional metadata for the vertex that is not contained in digest +type VertexOptions struct { + IgnoreCache bool + CacheSources []CacheManager + Description map[string]string // text values with no special meaning for solver + ExportCache *bool + // WorkerConstraint +} + +// Result is an abstract return value for a solve +type Result interface { + ID() string + Release(context.Context) error + Sys() interface{} +} + +// CachedResult is a result connected with its cache key +type CachedResult interface { + Result + CacheKey() ExportableCacheKey +} + +// CacheExportMode is the type for setting cache exporting modes +type CacheExportMode int + +const ( + // CacheExportModeMin exports a topmost allowed vertex and its dependencies + // that already have transferable layers + CacheExportModeMin CacheExportMode = iota + // CacheExportModeMax exports all possible non-root vertexes + CacheExportModeMax + // CacheExportModeRemoteOnly only exports vertexes that already have + // transferable layers + CacheExportModeRemoteOnly +) + +// CacheExportOpt defines options for exporting build cache +type CacheExportOpt struct { + // Convert can convert a build result to transferable object + Convert func(context.Context, Result) (*Remote, error) + // Mode defines a cache export algorithm + Mode CacheExportMode +} + +// CacheExporter can export the artifacts of the build chain +type CacheExporter interface { + ExportTo(ctx context.Context, t CacheExporterTarget, opt CacheExportOpt) ([]CacheExporterRecord, error) +} + +// CacheExporterTarget defines object capable of receiving exports +type CacheExporterTarget interface { + Add(dgst digest.Digest) CacheExporterRecord + Visit(interface{}) + Visited(interface{}) bool +} + +// CacheExporterRecord is a single object being exported +type CacheExporterRecord interface { + AddResult(createdAt time.Time, result *Remote) + LinkFrom(src CacheExporterRecord, index int, selector string) +} + +// Remote is a descriptor or a list of stacked descriptors that can be pulled +// from a content provider +// TODO: add closer to keep referenced data from getting deleted +type Remote struct { + Descriptors []ocispec.Descriptor + Provider content.Provider +} + +// CacheLink is a link between two cache records +type CacheLink struct { + Source digest.Digest `json:",omitempty"` + Input Index `json:",omitempty"` + Output Index `json:",omitempty"` + Base digest.Digest `json:",omitempty"` + Selector digest.Digest `json:",omitempty"` +} + +// Op is an implementation for running a vertex +type Op interface { + // CacheMap returns structure describing how the operation is cached. + // Currently only roots are allowed to return multiple cache maps per op. + CacheMap(context.Context, int) (*CacheMap, bool, error) + // Exec runs an operation given results from previous operations. + Exec(ctx context.Context, inputs []Result) (outputs []Result, err error) +} + +type ResultBasedCacheFunc func(context.Context, Result) (digest.Digest, error) + +type CacheMap struct { + // Digest is a base digest for operation that needs to be combined with + // inputs cache or selectors for dependencies. + Digest digest.Digest + Deps []struct { + // Optional digest that is merged with the cache key of the input + Selector digest.Digest + // Optional function that returns a digest for the input based on its + // return value + ComputeDigestFunc ResultBasedCacheFunc + } +} + +// ExportableCacheKey is a cache key connected with an exporter that can export +// a chain of cacherecords pointing to that key +type ExportableCacheKey struct { + *CacheKey + Exporter CacheExporter +} + +// CacheRecord is an identifier for loading in cache +type CacheRecord struct { + ID string + Size int + CreatedAt time.Time + Priority int + + cacheManager *cacheManager + key *CacheKey +} + +// CacheManager implements build cache backend +type CacheManager interface { + // ID is used to identify cache providers that are backed by same source + // to avoid duplicate calls to the same provider + ID() string + // Query searches for cache paths from one cache key to the output of a + // possible match. + Query(inp []CacheKeyWithSelector, inputIndex Index, dgst digest.Digest, outputIndex Index) ([]*CacheKey, error) + Records(ck *CacheKey) ([]*CacheRecord, error) + // Load pulls and returns the cached result + Load(ctx context.Context, rec *CacheRecord) (Result, error) + // Save saves a result based on a cache key + Save(key *CacheKey, s Result) (*ExportableCacheKey, error) +} diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource.go b/vendor/github.com/moby/buildkit/source/git/gitsource.go new file mode 100644 index 0000000000..e9b1e8c1af --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/git/gitsource.go @@ -0,0 +1,406 @@ +package git + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + + "github.com/boltdb/bolt" + "github.com/docker/docker/pkg/locker" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/source" + "github.com/moby/buildkit/util/progress/logs" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var validHex = regexp.MustCompile(`^[a-f0-9]{40}$`) + +type Opt struct { + CacheAccessor cache.Accessor + MetadataStore *metadata.Store +} + +type gitSource struct { + md *metadata.Store + cache cache.Accessor + locker *locker.Locker +} + +func NewSource(opt Opt) (source.Source, error) { + gs := &gitSource{ + md: opt.MetadataStore, + cache: opt.CacheAccessor, + locker: locker.New(), + } + + if err := exec.Command("git", "version").Run(); err != nil { + return nil, errors.Wrap(err, "failed to find git binary") + } + + return gs, nil +} + +func (gs *gitSource) ID() string { + return source.GitScheme +} + +// needs to be called with repo lock +func (gs *gitSource) mountRemote(ctx context.Context, remote string) (target string, release func(), retErr error) { + remoteKey := "git-remote::" + remote + + sis, err := gs.md.Search(remoteKey) + if err != nil { + return "", nil, errors.Wrapf(err, "failed to search metadata for %s", remote) + } + + var remoteRef cache.MutableRef + for _, si := range sis { + remoteRef, err = gs.cache.GetMutable(ctx, si.ID()) + if err != nil { + if cache.IsLocked(err) { + // should never really happen as no other function should access this metadata, but lets be graceful + logrus.Warnf("mutable ref for %s %s was locked: %v", remote, si.ID(), err) + continue + } + return "", nil, errors.Wrapf(err, "failed to get mutable ref for %s", remote) + } + break + } + + initializeRepo := false + if remoteRef == nil { + remoteRef, err = gs.cache.New(ctx, nil, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("shared git repo for %s", remote))) + if err != nil { + return "", nil, errors.Wrapf(err, "failed to create new mutable for %s", remote) + } + initializeRepo = true + } + + releaseRemoteRef := func() { + remoteRef.Release(context.TODO()) + } + + defer func() { + if retErr != nil && remoteRef != nil { + releaseRemoteRef() + } + }() + + mount, err := remoteRef.Mount(ctx, false) + if err != nil { + return "", nil, err + } + + lm := snapshot.LocalMounter(mount) + dir, err := lm.Mount() + if err != nil { + return "", nil, err + } + + defer func() { + if retErr != nil { + lm.Unmount() + } + }() + + if initializeRepo { + if _, err := gitWithinDir(ctx, dir, "", "init", "--bare"); err != nil { + return "", nil, errors.Wrapf(err, "failed to init repo at %s", dir) + } + + if _, err := gitWithinDir(ctx, dir, "", "remote", "add", "origin", remote); err != nil { + return "", nil, errors.Wrapf(err, "failed add origin repo at %s", dir) + } + + // same new remote metadata + si, _ := gs.md.Get(remoteRef.ID()) + v, err := metadata.NewValue(remoteKey) + v.Index = remoteKey + if err != nil { + return "", nil, err + } + + if err := si.Update(func(b *bolt.Bucket) error { + return si.SetValue(b, "git-remote", v) + }); err != nil { + return "", nil, err + } + } + return dir, func() { + lm.Unmount() + releaseRemoteRef() + }, nil +} + +type gitSourceHandler struct { + *gitSource + src source.GitIdentifier + cacheKey string +} + +func (gs *gitSource) Resolve(ctx context.Context, id source.Identifier) (source.SourceInstance, error) { + gitIdentifier, ok := id.(*source.GitIdentifier) + if !ok { + return nil, errors.Errorf("invalid git identifier %v", id) + } + + return &gitSourceHandler{ + src: *gitIdentifier, + gitSource: gs, + }, nil +} + +func (gs *gitSourceHandler) CacheKey(ctx context.Context, index int) (string, bool, error) { + remote := gs.src.Remote + ref := gs.src.Ref + if ref == "" { + ref = "master" + } + gs.locker.Lock(remote) + defer gs.locker.Unlock(remote) + + if isCommitSHA(ref) { + gs.cacheKey = ref + return ref, true, nil + } + + gitDir, unmountGitDir, err := gs.mountRemote(ctx, remote) + if err != nil { + return "", false, err + } + defer unmountGitDir() + + // TODO: should we assume that remote tag is immutable? add a timer? + + buf, err := gitWithinDir(ctx, gitDir, "", "ls-remote", "origin", ref) + if err != nil { + return "", false, errors.Wrapf(err, "failed to fetch remote %s", remote) + } + out := buf.String() + idx := strings.Index(out, "\t") + if idx == -1 { + return "", false, errors.Errorf("failed to find commit SHA from output: %s", string(out)) + } + + sha := string(out[:idx]) + if !isCommitSHA(sha) { + return "", false, errors.Errorf("invalid commit sha %q", sha) + } + gs.cacheKey = sha + return sha, true, nil +} + +func (gs *gitSourceHandler) Snapshot(ctx context.Context) (out cache.ImmutableRef, retErr error) { + ref := gs.src.Ref + if ref == "" { + ref = "master" + } + + cacheKey := gs.cacheKey + if cacheKey == "" { + var err error + cacheKey, _, err = gs.CacheKey(ctx, 0) + if err != nil { + return nil, err + } + } + + snapshotKey := "git-snapshot::" + cacheKey + ":" + gs.src.Subdir + gs.locker.Lock(snapshotKey) + defer gs.locker.Unlock(snapshotKey) + + sis, err := gs.md.Search(snapshotKey) + if err != nil { + return nil, errors.Wrapf(err, "failed to search metadata for %s", snapshotKey) + } + if len(sis) > 0 { + return gs.cache.Get(ctx, sis[0].ID()) + } + + gs.locker.Lock(gs.src.Remote) + defer gs.locker.Unlock(gs.src.Remote) + gitDir, unmountGitDir, err := gs.mountRemote(ctx, gs.src.Remote) + if err != nil { + return nil, err + } + defer unmountGitDir() + + doFetch := true + if isCommitSHA(ref) { + // skip fetch if commit already exists + if _, err := gitWithinDir(ctx, gitDir, "", "cat-file", "-e", ref+"^{commit}"); err == nil { + doFetch = false + } + } + + if doFetch { + args := []string{"fetch"} + if !isCommitSHA(ref) { // TODO: find a branch from ls-remote? + args = append(args, "--depth=1", "--no-tags") + } else { + if _, err := os.Lstat(filepath.Join(gitDir, "shallow")); err == nil { + args = append(args, "--unshallow") + } + } + args = append(args, "origin") + if !isCommitSHA(ref) { + args = append(args, ref+":tags/"+ref) + // local refs are needed so they would be advertised on next fetches + // TODO: is there a better way to do this? + } + if _, err := gitWithinDir(ctx, gitDir, "", args...); err != nil { + return nil, errors.Wrapf(err, "failed to fetch remote %s", gs.src.Remote) + } + } + + checkoutRef, err := gs.cache.New(ctx, nil, cache.WithDescription(fmt.Sprintf("git snapshot for %s#%s", gs.src.Remote, ref))) + if err != nil { + return nil, errors.Wrapf(err, "failed to create new mutable for %s", gs.src.Remote) + } + + defer func() { + if retErr != nil && checkoutRef != nil { + checkoutRef.Release(context.TODO()) + } + }() + + mount, err := checkoutRef.Mount(ctx, false) + if err != nil { + return nil, err + } + lm := snapshot.LocalMounter(mount) + checkoutDir, err := lm.Mount() + if err != nil { + return nil, err + } + defer func() { + if retErr != nil && lm != nil { + lm.Unmount() + } + }() + + if gs.src.KeepGitDir { + _, err = gitWithinDir(ctx, checkoutDir, "", "init") + if err != nil { + return nil, err + } + _, err = gitWithinDir(ctx, checkoutDir, "", "remote", "add", "origin", gitDir) + if err != nil { + return nil, err + } + pullref := ref + if isCommitSHA(ref) { + pullref = "refs/buildkit/" + identity.NewID() + _, err = gitWithinDir(ctx, gitDir, "", "update-ref", pullref, ref) + if err != nil { + return nil, err + } + } + _, err = gitWithinDir(ctx, checkoutDir, "", "fetch", "--depth=1", "origin", pullref) + if err != nil { + return nil, err + } + _, err = gitWithinDir(ctx, checkoutDir, checkoutDir, "checkout", "FETCH_HEAD") + if err != nil { + return nil, errors.Wrapf(err, "failed to checkout remote %s", gs.src.Remote) + } + gitDir = checkoutDir + } else { + _, err = gitWithinDir(ctx, gitDir, checkoutDir, "checkout", ref, "--", ".") + if err != nil { + return nil, errors.Wrapf(err, "failed to checkout remote %s", gs.src.Remote) + } + } + + _, err = gitWithinDir(ctx, gitDir, checkoutDir, "submodule", "update", "--init", "--recursive", "--depth=1") + if err != nil { + return nil, errors.Wrapf(err, "failed to update submodules for %s", gs.src.Remote) + } + + lm.Unmount() + lm = nil + + snap, err := checkoutRef.Commit(ctx) + if err != nil { + return nil, err + } + checkoutRef = nil + + defer func() { + if retErr != nil { + snap.Release(context.TODO()) + } + }() + + si, _ := gs.md.Get(snap.ID()) + v, err := metadata.NewValue(snapshotKey) + v.Index = snapshotKey + if err != nil { + return nil, err + } + if err := si.Update(func(b *bolt.Bucket) error { + return si.SetValue(b, "git-snapshot", v) + }); err != nil { + return nil, err + } + + return snap, nil +} + +func isCommitSHA(str string) bool { + return validHex.MatchString(str) +} + +func gitWithinDir(ctx context.Context, gitDir, workDir string, args ...string) (*bytes.Buffer, error) { + a := []string{"--git-dir", gitDir} + if workDir != "" { + a = append(a, "--work-tree", workDir) + } + return git(ctx, workDir, append(a, args...)...) +} + +func git(ctx context.Context, dir string, args ...string) (*bytes.Buffer, error) { + for { + stdout, stderr := logs.NewLogStreams(ctx, false) + defer stdout.Close() + defer stderr.Close() + cmd := exec.Command("git", args...) + cmd.Dir = dir // some commands like submodule require this + buf := bytes.NewBuffer(nil) + errbuf := bytes.NewBuffer(nil) + cmd.Stdout = io.MultiWriter(stdout, buf) + cmd.Stderr = io.MultiWriter(stderr, errbuf) + // remote git commands spawn helper processes that inherit FDs and don't + // handle parent death signal so exec.CommandContext can't be used + err := runProcessGroup(ctx, cmd) + if err != nil { + if strings.Contains(errbuf.String(), "--depth") || strings.Contains(errbuf.String(), "shallow") { + if newArgs := argsNoDepth(args); len(args) > len(newArgs) { + args = newArgs + continue + } + } + } + return buf, err + } +} + +func argsNoDepth(args []string) []string { + out := make([]string, 0, len(args)) + for _, a := range args { + if a != "--depth=1" { + out = append(out, a) + } + } + return out +} diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go b/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go new file mode 100644 index 0000000000..4d0e9d89d2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go @@ -0,0 +1,27 @@ +// +build !windows + +package git + +import ( + "context" + "os/exec" + "syscall" +) + +func runProcessGroup(ctx context.Context, cmd *exec.Cmd) error { + cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} + if err := cmd.Start(); err != nil { + return err + } + waitDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + syscall.Kill(-cmd.Process.Pid, syscall.SIGKILL) + case <-waitDone: + } + }() + err := cmd.Wait() + close(waitDone) + return err +} diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go b/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go new file mode 100644 index 0000000000..3435c8f9ee --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go @@ -0,0 +1,23 @@ +// +build windows + +package git + +import ( + "context" + "os/exec" +) + +func runProcessGroup(ctx context.Context, cmd *exec.Cmd) error { + if err := cmd.Start(); err != nil { + return err + } + waitDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + cmd.Process.Kill() + case <-waitDone: + } + }() + return cmd.Wait() +} diff --git a/vendor/github.com/moby/buildkit/source/gitidentifier.go b/vendor/github.com/moby/buildkit/source/gitidentifier.go new file mode 100644 index 0000000000..9f338343bf --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/gitidentifier.go @@ -0,0 +1,70 @@ +package source + +import ( + "net/url" + "strings" + + "github.com/pkg/errors" +) + +type GitIdentifier struct { + Remote string + Ref string + Subdir string + KeepGitDir bool +} + +func NewGitIdentifier(remoteURL string) (*GitIdentifier, error) { + repo := GitIdentifier{} + + if !isGitTransport(remoteURL) { + remoteURL = "https://" + remoteURL + } + + var fragment string + if strings.HasPrefix(remoteURL, "git@") { + // git@.. is not an URL, so cannot be parsed as URL + parts := strings.SplitN(remoteURL, "#", 2) + + repo.Remote = parts[0] + if len(parts) == 2 { + fragment = parts[1] + } + repo.Ref, repo.Subdir = getRefAndSubdir(fragment) + } else { + u, err := url.Parse(remoteURL) + if err != nil { + return nil, err + } + + repo.Ref, repo.Subdir = getRefAndSubdir(u.Fragment) + u.Fragment = "" + repo.Remote = u.String() + } + if repo.Subdir != "" { + return nil, errors.Errorf("subdir not supported yet") + } + return &repo, nil +} + +func (i *GitIdentifier) ID() string { + return "git" +} + +// isGitTransport returns true if the provided str is a git transport by inspecting +// the prefix of the string for known protocols used in git. +func isGitTransport(str string) bool { + return strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://") || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") +} + +func getRefAndSubdir(fragment string) (ref string, subdir string) { + refAndDir := strings.SplitN(fragment, ":", 2) + ref = "master" + if len(refAndDir[0]) != 0 { + ref = refAndDir[0] + } + if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { + subdir = refAndDir[1] + } + return +} diff --git a/vendor/github.com/moby/buildkit/source/http/httpsource.go b/vendor/github.com/moby/buildkit/source/http/httpsource.go new file mode 100644 index 0000000000..58c7040098 --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/http/httpsource.go @@ -0,0 +1,429 @@ +package http + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "mime" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/boltdb/bolt" + "github.com/docker/docker/pkg/locker" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/source" + "github.com/moby/buildkit/util/tracing" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type Opt struct { + CacheAccessor cache.Accessor + MetadataStore *metadata.Store + Transport http.RoundTripper +} + +type httpSource struct { + md *metadata.Store + cache cache.Accessor + locker *locker.Locker + client *http.Client +} + +func NewSource(opt Opt) (source.Source, error) { + transport := opt.Transport + if transport == nil { + transport = tracing.DefaultTransport + } + hs := &httpSource{ + md: opt.MetadataStore, + cache: opt.CacheAccessor, + locker: locker.New(), + client: &http.Client{ + Transport: transport, + }, + } + return hs, nil +} + +func (hs *httpSource) ID() string { + return source.HttpsScheme +} + +type httpSourceHandler struct { + *httpSource + src source.HttpIdentifier + refID string + cacheKey digest.Digest +} + +func (hs *httpSource) Resolve(ctx context.Context, id source.Identifier) (source.SourceInstance, error) { + httpIdentifier, ok := id.(*source.HttpIdentifier) + if !ok { + return nil, errors.Errorf("invalid http identifier %v", id) + } + + return &httpSourceHandler{ + src: *httpIdentifier, + httpSource: hs, + }, nil +} + +// urlHash is internal hash the etag is stored by that doesn't leak outside +// this package. +func (hs *httpSourceHandler) urlHash() (digest.Digest, error) { + dt, err := json.Marshal(struct { + Filename string + Perm, UID, GID int + }{ + Filename: getFileName(hs.src.URL, hs.src.Filename, nil), + Perm: hs.src.Perm, + UID: hs.src.UID, + GID: hs.src.GID, + }) + if err != nil { + return "", err + } + return digest.FromBytes(dt), nil +} + +func (hs *httpSourceHandler) formatCacheKey(filename string, dgst digest.Digest, lastModTime string) digest.Digest { + dt, err := json.Marshal(struct { + Filename string + Perm, UID, GID int + Checksum digest.Digest + LastModTime string `json:",omitempty"` + }{ + Filename: filename, + Perm: hs.src.Perm, + UID: hs.src.UID, + GID: hs.src.GID, + Checksum: dgst, + LastModTime: lastModTime, + }) + if err != nil { + return dgst + } + return digest.FromBytes(dt) +} + +func (hs *httpSourceHandler) CacheKey(ctx context.Context, index int) (string, bool, error) { + if hs.src.Checksum != "" { + hs.cacheKey = hs.src.Checksum + return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, nil), hs.src.Checksum, "").String(), true, nil + } + + uh, err := hs.urlHash() + if err != nil { + return "", false, nil + } + + // look up metadata(previously stored headers) for that URL + sis, err := hs.md.Search(uh.String()) + if err != nil { + return "", false, errors.Wrapf(err, "failed to search metadata for %s", uh) + } + + req, err := http.NewRequest("GET", hs.src.URL, nil) + if err != nil { + return "", false, err + } + req = req.WithContext(ctx) + m := map[string]*metadata.StorageItem{} + + if len(sis) > 0 { + for _, si := range sis { + // if metaDigest := getMetaDigest(si); metaDigest == hs.formatCacheKey("") { + if etag := getETag(si); etag != "" { + if dgst := getChecksum(si); dgst != "" { + m[etag] = si + req.Header.Add("If-None-Match", etag) + } + } + // } + } + } + + resp, err := hs.client.Do(req) + if err != nil { + return "", false, err + } + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return "", false, errors.Errorf("invalid response status %d", resp.StatusCode) + } + if resp.StatusCode == http.StatusNotModified { + respETag := resp.Header.Get("ETag") + si, ok := m[respETag] + if !ok { + return "", false, errors.Errorf("invalid not-modified ETag: %v", respETag) + } + hs.refID = si.ID() + dgst := getChecksum(si) + if dgst == "" { + return "", false, errors.Errorf("invalid metadata change") + } + modTime := getModTime(si) + resp.Body.Close() + return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, modTime).String(), true, nil + } + + ref, dgst, err := hs.save(ctx, resp) + if err != nil { + return "", false, err + } + ref.Release(context.TODO()) + + hs.cacheKey = dgst + + return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, resp.Header.Get("Last-Modified")).String(), true, nil +} + +func (hs *httpSourceHandler) save(ctx context.Context, resp *http.Response) (ref cache.ImmutableRef, dgst digest.Digest, retErr error) { + newRef, err := hs.cache.New(ctx, nil, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("http url %s", hs.src.URL))) + if err != nil { + return nil, "", err + } + + releaseRef := func() { + newRef.Release(context.TODO()) + } + + defer func() { + if retErr != nil && newRef != nil { + releaseRef() + } + }() + + mount, err := newRef.Mount(ctx, false) + if err != nil { + return nil, "", err + } + + lm := snapshot.LocalMounter(mount) + dir, err := lm.Mount() + if err != nil { + return nil, "", err + } + + defer func() { + if retErr != nil && lm != nil { + lm.Unmount() + } + }() + perm := 0600 + if hs.src.Perm != 0 { + perm = hs.src.Perm + } + fp := filepath.Join(dir, getFileName(hs.src.URL, hs.src.Filename, resp)) + + f, err := os.OpenFile(fp, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(perm)) + if err != nil { + return nil, "", err + } + defer func() { + if f != nil { + f.Close() + } + }() + + h := sha256.New() + + if _, err := io.Copy(io.MultiWriter(f, h), resp.Body); err != nil { + return nil, "", err + } + + if err := f.Close(); err != nil { + return nil, "", err + } + f = nil + + if hs.src.UID != 0 || hs.src.GID != 0 { + if err := os.Chown(fp, hs.src.UID, hs.src.GID); err != nil { + return nil, "", err + } + } + + mTime := time.Unix(0, 0) + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + if parsedMTime, err := http.ParseTime(lastMod); err == nil { + mTime = parsedMTime + } + } + + if err := os.Chtimes(fp, mTime, mTime); err != nil { + return nil, "", err + } + + lm.Unmount() + lm = nil + + ref, err = newRef.Commit(ctx) + if err != nil { + return nil, "", err + } + newRef = nil + + hs.refID = ref.ID() + dgst = digest.NewDigest(digest.SHA256, h) + + if respETag := resp.Header.Get("ETag"); respETag != "" { + setETag(ref.Metadata(), respETag) + uh, err := hs.urlHash() + if err != nil { + return nil, "", err + } + setChecksum(ref.Metadata(), uh.String(), dgst) + if err := ref.Metadata().Commit(); err != nil { + return nil, "", err + } + } + + if modTime := resp.Header.Get("Last-Modified"); modTime != "" { + setModTime(ref.Metadata(), modTime) + } + + return ref, dgst, nil +} + +func (hs *httpSourceHandler) Snapshot(ctx context.Context) (cache.ImmutableRef, error) { + if hs.refID != "" { + ref, err := hs.cache.Get(ctx, hs.refID) + if err == nil { + return ref, nil + } + } + + req, err := http.NewRequest("GET", hs.src.URL, nil) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + + resp, err := hs.client.Do(req) + if err != nil { + return nil, err + } + + ref, dgst, err := hs.save(ctx, resp) + if err != nil { + return nil, err + } + if dgst != hs.cacheKey { + ref.Release(context.TODO()) + return nil, errors.Errorf("digest mismatch %s: %s", dgst, hs.cacheKey) + } + + return ref, nil +} + +const keyETag = "etag" +const keyChecksum = "http.checksum" +const keyModTime = "http.modtime" + +func setETag(si *metadata.StorageItem, s string) error { + v, err := metadata.NewValue(s) + if err != nil { + return errors.Wrap(err, "failed to create etag value") + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyETag, v) + }) + return nil +} + +func getETag(si *metadata.StorageItem) string { + v := si.Get(keyETag) + if v == nil { + return "" + } + var etag string + if err := v.Unmarshal(&etag); err != nil { + return "" + } + return etag +} + +func setModTime(si *metadata.StorageItem, s string) error { + v, err := metadata.NewValue(s) + if err != nil { + return errors.Wrap(err, "failed to create modtime value") + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyModTime, v) + }) + return nil +} + +func getModTime(si *metadata.StorageItem) string { + v := si.Get(keyModTime) + if v == nil { + return "" + } + var modTime string + if err := v.Unmarshal(&modTime); err != nil { + return "" + } + return modTime +} + +func setChecksum(si *metadata.StorageItem, url string, d digest.Digest) error { + v, err := metadata.NewValue(d) + if err != nil { + return errors.Wrap(err, "failed to create checksum value") + } + v.Index = url + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyChecksum, v) + }) + return nil +} + +func getChecksum(si *metadata.StorageItem) digest.Digest { + v := si.Get(keyChecksum) + if v == nil { + return "" + } + var dgstStr string + if err := v.Unmarshal(&dgstStr); err != nil { + return "" + } + dgst, err := digest.Parse(dgstStr) + if err != nil { + return "" + } + return dgst +} + +func getFileName(urlStr, manualFilename string, resp *http.Response) string { + if manualFilename != "" { + return manualFilename + } + if resp != nil { + if contentDisposition := resp.Header.Get("Content-Disposition"); contentDisposition != "" { + if _, params, err := mime.ParseMediaType(contentDisposition); err == nil { + if params["filename"] != "" && !strings.HasSuffix(params["filename"], "/") { + if filename := filepath.Base(filepath.FromSlash(params["filename"])); filename != "" { + return filename + } + } + } + } + } + u, err := url.Parse(urlStr) + if err == nil { + if base := path.Base(u.Path); base != "." && base != "/" { + return base + } + } + return "download" +} diff --git a/vendor/github.com/moby/buildkit/source/identifier.go b/vendor/github.com/moby/buildkit/source/identifier.go new file mode 100644 index 0000000000..ae6814c6f0 --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/identifier.go @@ -0,0 +1,194 @@ +package source + +import ( + "encoding/json" + "strconv" + "strings" + + "github.com/containerd/containerd/reference" + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +var ( + errInvalid = errors.New("invalid") + errNotFound = errors.New("not found") +) + +const ( + DockerImageScheme = "docker-image" + GitScheme = "git" + LocalScheme = "local" + HttpScheme = "http" + HttpsScheme = "https" +) + +type Identifier interface { + ID() string // until sources are in process this string comparison could be avoided +} + +func FromString(s string) (Identifier, error) { + // TODO: improve this + parts := strings.SplitN(s, "://", 2) + if len(parts) != 2 { + return nil, errors.Wrapf(errInvalid, "failed to parse %s", s) + } + + switch parts[0] { + case DockerImageScheme: + return NewImageIdentifier(parts[1]) + case GitScheme: + return NewGitIdentifier(parts[1]) + case LocalScheme: + return NewLocalIdentifier(parts[1]) + case HttpsScheme: + return NewHttpIdentifier(parts[1], true) + case HttpScheme: + return NewHttpIdentifier(parts[1], false) + default: + return nil, errors.Wrapf(errNotFound, "unknown schema %s", parts[0]) + } +} +func FromLLB(op *pb.Op_Source) (Identifier, error) { + id, err := FromString(op.Source.Identifier) + if err != nil { + return nil, err + } + if id, ok := id.(*GitIdentifier); ok { + for k, v := range op.Source.Attrs { + switch k { + case pb.AttrKeepGitDir: + if v == "true" { + id.KeepGitDir = true + } + case pb.AttrFullRemoteURL: + id.Remote = v + } + } + } + if id, ok := id.(*LocalIdentifier); ok { + for k, v := range op.Source.Attrs { + switch k { + case pb.AttrLocalSessionID: + id.SessionID = v + if p := strings.SplitN(v, ":", 2); len(p) == 2 { + id.Name = p[0] + "-" + id.Name + id.SessionID = p[1] + } + case pb.AttrIncludePatterns: + var patterns []string + if err := json.Unmarshal([]byte(v), &patterns); err != nil { + return nil, err + } + id.IncludePatterns = patterns + case pb.AttrExcludePatterns: + var patterns []string + if err := json.Unmarshal([]byte(v), &patterns); err != nil { + return nil, err + } + id.ExcludePatterns = patterns + case pb.AttrFollowPaths: + var paths []string + if err := json.Unmarshal([]byte(v), &paths); err != nil { + return nil, err + } + id.FollowPaths = paths + case pb.AttrSharedKeyHint: + id.SharedKeyHint = v + } + } + } + if id, ok := id.(*HttpIdentifier); ok { + for k, v := range op.Source.Attrs { + switch k { + case pb.AttrHTTPChecksum: + dgst, err := digest.Parse(v) + if err != nil { + return nil, err + } + id.Checksum = dgst + case pb.AttrHTTPFilename: + id.Filename = v + case pb.AttrHTTPPerm: + i, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, err + } + id.Perm = int(i) + case pb.AttrHTTPUID: + i, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, err + } + id.UID = int(i) + case pb.AttrHTTPGID: + i, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, err + } + id.GID = int(i) + } + } + } + return id, nil +} + +type ImageIdentifier struct { + Reference reference.Spec +} + +func NewImageIdentifier(str string) (*ImageIdentifier, error) { + ref, err := reference.Parse(str) + if err != nil { + return nil, errors.WithStack(err) + } + + if ref.Object == "" { + return nil, errors.WithStack(reference.ErrObjectRequired) + } + return &ImageIdentifier{Reference: ref}, nil +} + +func (_ *ImageIdentifier) ID() string { + return DockerImageScheme +} + +type LocalIdentifier struct { + Name string + SessionID string + IncludePatterns []string + ExcludePatterns []string + FollowPaths []string + SharedKeyHint string +} + +func NewLocalIdentifier(str string) (*LocalIdentifier, error) { + return &LocalIdentifier{Name: str}, nil +} + +func (*LocalIdentifier) ID() string { + return LocalScheme +} + +func NewHttpIdentifier(str string, tls bool) (*HttpIdentifier, error) { + proto := "https://" + if !tls { + proto = "http://" + } + return &HttpIdentifier{TLS: tls, URL: proto + str}, nil +} + +type HttpIdentifier struct { + TLS bool + URL string + Checksum digest.Digest + Filename string + Perm int + UID int + GID int +} + +func (_ *HttpIdentifier) ID() string { + return HttpsScheme +} diff --git a/vendor/github.com/moby/buildkit/source/local/local.go b/vendor/github.com/moby/buildkit/source/local/local.go new file mode 100644 index 0000000000..1e9ab6a4a3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/local/local.go @@ -0,0 +1,249 @@ +package local + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/boltdb/bolt" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/contenthash" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/filesync" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/source" + "github.com/moby/buildkit/util/progress" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/tonistiigi/fsutil" + "golang.org/x/time/rate" +) + +const keySharedKey = "local.sharedKey" + +type Opt struct { + SessionManager *session.Manager + CacheAccessor cache.Accessor + MetadataStore *metadata.Store +} + +func NewSource(opt Opt) (source.Source, error) { + ls := &localSource{ + sm: opt.SessionManager, + cm: opt.CacheAccessor, + md: opt.MetadataStore, + } + return ls, nil +} + +type localSource struct { + sm *session.Manager + cm cache.Accessor + md *metadata.Store +} + +func (ls *localSource) ID() string { + return source.LocalScheme +} + +func (ls *localSource) Resolve(ctx context.Context, id source.Identifier) (source.SourceInstance, error) { + localIdentifier, ok := id.(*source.LocalIdentifier) + if !ok { + return nil, errors.Errorf("invalid local identifier %v", id) + } + + return &localSourceHandler{ + src: *localIdentifier, + localSource: ls, + }, nil +} + +type localSourceHandler struct { + src source.LocalIdentifier + *localSource +} + +func (ls *localSourceHandler) CacheKey(ctx context.Context, index int) (string, bool, error) { + sessionID := ls.src.SessionID + + if sessionID == "" { + id := session.FromContext(ctx) + if id == "" { + return "", false, errors.New("could not access local files without session") + } + sessionID = id + } + dt, err := json.Marshal(struct { + SessionID string + IncludePatterns []string + ExcludePatterns []string + }{SessionID: sessionID, IncludePatterns: ls.src.IncludePatterns, ExcludePatterns: ls.src.ExcludePatterns}) + if err != nil { + return "", false, err + } + return "session:" + ls.src.Name + ":" + digest.FromBytes(dt).String(), true, nil +} + +func (ls *localSourceHandler) Snapshot(ctx context.Context) (out cache.ImmutableRef, retErr error) { + + id := session.FromContext(ctx) + if id == "" { + return nil, errors.New("could not access local files without session") + } + + timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + caller, err := ls.sm.Get(timeoutCtx, id) + if err != nil { + return nil, err + } + + sharedKey := keySharedKey + ":" + ls.src.Name + ":" + ls.src.SharedKeyHint + ":" + caller.SharedKey() // TODO: replace caller.SharedKey() with source based hint from client(absolute-path+nodeid) + + var mutable cache.MutableRef + sis, err := ls.md.Search(sharedKey) + if err != nil { + return nil, err + } + for _, si := range sis { + if m, err := ls.cm.GetMutable(ctx, si.ID()); err == nil { + logrus.Debugf("reusing ref for local: %s", m.ID()) + mutable = m + break + } + } + + if mutable == nil { + m, err := ls.cm.New(ctx, nil, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("local source for %s", ls.src.Name))) + if err != nil { + return nil, err + } + mutable = m + logrus.Debugf("new ref for local: %s", mutable.ID()) + } + + defer func() { + if retErr != nil && mutable != nil { + go mutable.Release(context.TODO()) + } + }() + + mount, err := mutable.Mount(ctx, false) + if err != nil { + return nil, err + } + + lm := snapshot.LocalMounter(mount) + + dest, err := lm.Mount() + if err != nil { + return nil, err + } + + defer func() { + if retErr != nil && lm != nil { + lm.Unmount() + } + }() + + cc, err := contenthash.GetCacheContext(ctx, mutable.Metadata()) + if err != nil { + return nil, err + } + + opt := filesync.FSSendRequestOpt{ + Name: ls.src.Name, + IncludePatterns: ls.src.IncludePatterns, + ExcludePatterns: ls.src.ExcludePatterns, + FollowPaths: ls.src.FollowPaths, + OverrideExcludes: false, + DestDir: dest, + CacheUpdater: &cacheUpdater{cc}, + ProgressCb: newProgressHandler(ctx, "transferring "+ls.src.Name+":"), + } + + if err := filesync.FSSync(ctx, caller, opt); err != nil { + return nil, err + } + + if err := lm.Unmount(); err != nil { + return nil, err + } + lm = nil + + if err := contenthash.SetCacheContext(ctx, mutable.Metadata(), cc); err != nil { + return nil, err + } + + // skip storing snapshot by the shared key if it already exists + skipStoreSharedKey := false + si, _ := ls.md.Get(mutable.ID()) + if v := si.Get(keySharedKey); v != nil { + var str string + if err := v.Unmarshal(&str); err != nil { + return nil, err + } + skipStoreSharedKey = str == sharedKey + } + if !skipStoreSharedKey { + v, err := metadata.NewValue(sharedKey) + if err != nil { + return nil, err + } + v.Index = sharedKey + if err := si.Update(func(b *bolt.Bucket) error { + return si.SetValue(b, sharedKey, v) + }); err != nil { + return nil, err + } + logrus.Debugf("saved %s as %s", mutable.ID(), sharedKey) + } + + snap, err := mutable.Commit(ctx) + if err != nil { + return nil, err + } + + mutable = nil // avoid deferred cleanup + + return snap, nil +} + +func newProgressHandler(ctx context.Context, id string) func(int, bool) { + limiter := rate.NewLimiter(rate.Every(100*time.Millisecond), 1) + pw, _, _ := progress.FromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + Action: "transferring", + } + pw.Write(id, st) + return func(s int, last bool) { + if last || limiter.Allow() { + st.Current = s + if last { + now := time.Now() + st.Completed = &now + } + pw.Write(id, st) + if last { + pw.Close() + } + } + } +} + +type cacheUpdater struct { + contenthash.CacheContext +} + +func (cu *cacheUpdater) MarkSupported(bool) { +} + +func (cu *cacheUpdater) ContentHasher() fsutil.ContentHasher { + return contenthash.NewFromStat +} diff --git a/vendor/github.com/moby/buildkit/source/manager.go b/vendor/github.com/moby/buildkit/source/manager.go new file mode 100644 index 0000000000..e520b6c77c --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/manager.go @@ -0,0 +1,48 @@ +package source + +import ( + "context" + "sync" + + "github.com/moby/buildkit/cache" + "github.com/pkg/errors" +) + +type Source interface { + ID() string + Resolve(ctx context.Context, id Identifier) (SourceInstance, error) +} + +type SourceInstance interface { + CacheKey(ctx context.Context, index int) (string, bool, error) + Snapshot(ctx context.Context) (cache.ImmutableRef, error) +} + +type Manager struct { + mu sync.Mutex + sources map[string]Source +} + +func NewManager() (*Manager, error) { + return &Manager{ + sources: make(map[string]Source), + }, nil +} + +func (sm *Manager) Register(src Source) { + sm.mu.Lock() + sm.sources[src.ID()] = src + sm.mu.Unlock() +} + +func (sm *Manager) Resolve(ctx context.Context, id Identifier) (SourceInstance, error) { + sm.mu.Lock() + src, ok := sm.sources[id.ID()] + sm.mu.Unlock() + + if !ok { + return nil, errors.Errorf("no handler for %s", id.ID()) + } + + return src.Resolve(ctx, id) +} diff --git a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go new file mode 100644 index 0000000000..7b907ad32b --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go @@ -0,0 +1,55 @@ +// +build !windows + +package appdefaults + +import ( + "os" + "path/filepath" + "strings" +) + +const ( + Address = "unix:///run/buildkit/buildkitd.sock" + Root = "/var/lib/buildkit" +) + +// UserAddress typically returns /run/user/$UID/buildkit/buildkitd.sock +func UserAddress() string { + // pam_systemd sets XDG_RUNTIME_DIR but not other dirs. + xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR") + if xdgRuntimeDir != "" { + dirs := strings.Split(xdgRuntimeDir, ":") + return "unix://" + filepath.Join(dirs[0], "buildkit", "buildkitd.sock") + } + return Address +} + +// EnsureUserAddressDir sets sticky bit on XDG_RUNTIME_DIR if XDG_RUNTIME_DIR is set. +// See https://github.com/opencontainers/runc/issues/1694 +func EnsureUserAddressDir() error { + xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR") + if xdgRuntimeDir != "" { + dirs := strings.Split(xdgRuntimeDir, ":") + dir := filepath.Join(dirs[0], "buildkit") + if err := os.MkdirAll(dir, 0700); err != nil { + return err + } + return os.Chmod(dir, 0700|os.ModeSticky) + } + return nil +} + +// UserRoot typically returns /home/$USER/.local/share/buildkit +func UserRoot() string { + // pam_systemd sets XDG_RUNTIME_DIR but not other dirs. + xdgDataHome := os.Getenv("XDG_DATA_HOME") + if xdgDataHome != "" { + dirs := strings.Split(xdgDataHome, ":") + return filepath.Join(dirs[0], "buildkit") + } + home := os.Getenv("HOME") + if home != "" { + return filepath.Join(home, ".local", "share", "buildkit") + } + return Root +} diff --git a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go new file mode 100644 index 0000000000..dbc96c8095 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go @@ -0,0 +1,18 @@ +package appdefaults + +const ( + Address = "npipe:////./pipe/buildkitd" + Root = ".buildstate" +) + +func UserAddress() string { + return Address +} + +func EnsureUserAddressDir() error { + return nil +} + +func UserRoot() string { + return Root +} diff --git a/vendor/github.com/moby/buildkit/util/cond/cond.go b/vendor/github.com/moby/buildkit/util/cond/cond.go new file mode 100644 index 0000000000..c5e07aec9e --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/cond/cond.go @@ -0,0 +1,40 @@ +package cond + +import ( + "sync" +) + +// NewStatefulCond returns a stateful version of sync.Cond . This cond will +// never block on `Wait()` if `Signal()` has been called after the `Wait()` last +// returned. This is useful for avoiding to take a lock on `cond.Locker` for +// signalling. +func NewStatefulCond(l sync.Locker) *StatefulCond { + sc := &StatefulCond{main: l} + sc.c = sync.NewCond(&sc.mu) + return sc +} + +type StatefulCond struct { + main sync.Locker + mu sync.Mutex + c *sync.Cond + signalled bool +} + +func (s *StatefulCond) Wait() { + s.main.Unlock() + s.mu.Lock() + if !s.signalled { + s.c.Wait() + } + s.signalled = false + s.mu.Unlock() + s.main.Lock() +} + +func (s *StatefulCond) Signal() { + s.mu.Lock() + s.signalled = true + s.c.Signal() + s.mu.Unlock() +} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/buffer.go b/vendor/github.com/moby/buildkit/util/contentutil/buffer.go new file mode 100644 index 0000000000..ac8c8baff3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/contentutil/buffer.go @@ -0,0 +1,156 @@ +package contentutil + +import ( + "bytes" + "context" + "io/ioutil" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// Buffer is a content provider and ingester that keeps data in memory +type Buffer interface { + content.Provider + content.Ingester +} + +// NewBuffer returns a new buffer +func NewBuffer() Buffer { + return &buffer{ + buffers: map[digest.Digest][]byte{}, + refs: map[string]struct{}{}, + } +} + +type buffer struct { + mu sync.Mutex + buffers map[digest.Digest][]byte + refs map[string]struct{} +} + +func (b *buffer) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + b.mu.Lock() + if _, ok := b.refs[wOpts.Ref]; ok { + return nil, errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked", wOpts.Ref) + } + b.mu.Unlock() + return &bufferedWriter{ + main: b, + digester: digest.Canonical.Digester(), + buffer: bytes.NewBuffer(nil), + expected: wOpts.Desc.Digest, + releaseRef: func() { + b.mu.Lock() + delete(b.refs, wOpts.Ref) + b.mu.Unlock() + }, + }, nil +} + +func (b *buffer) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + r, err := b.getBytesReader(ctx, desc.Digest) + if err != nil { + return nil, err + } + return &readerAt{Reader: r, Closer: ioutil.NopCloser(r), size: int64(r.Len())}, nil +} + +func (b *buffer) getBytesReader(ctx context.Context, dgst digest.Digest) (*bytes.Reader, error) { + b.mu.Lock() + defer b.mu.Unlock() + + if dt, ok := b.buffers[dgst]; ok { + return bytes.NewReader(dt), nil + } + + return nil, errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst) +} + +func (b *buffer) addValue(k digest.Digest, dt []byte) { + b.mu.Lock() + defer b.mu.Unlock() + b.buffers[k] = dt +} + +type bufferedWriter struct { + main *buffer + ref string + offset int64 + total int64 + startedAt time.Time + updatedAt time.Time + buffer *bytes.Buffer + expected digest.Digest + digester digest.Digester + releaseRef func() +} + +func (w *bufferedWriter) Write(p []byte) (n int, err error) { + n, err = w.buffer.Write(p) + w.digester.Hash().Write(p[:n]) + w.offset += int64(len(p)) + w.updatedAt = time.Now() + return n, err +} + +func (w *bufferedWriter) Close() error { + if w.buffer != nil { + w.releaseRef() + w.buffer = nil + } + return nil +} + +func (w *bufferedWriter) Status() (content.Status, error) { + return content.Status{ + Ref: w.ref, + Offset: w.offset, + Total: w.total, + StartedAt: w.startedAt, + UpdatedAt: w.updatedAt, + }, nil +} + +func (w *bufferedWriter) Digest() digest.Digest { + return w.digester.Digest() +} + +func (w *bufferedWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opt ...content.Opt) error { + if w.buffer == nil { + return errors.Errorf("can't commit already committed or closed") + } + if s := int64(w.buffer.Len()); size > 0 && size != s { + return errors.Errorf("unexpected commit size %d, expected %d", s, size) + } + dgst := w.digester.Digest() + if expected != "" && expected != dgst { + return errors.Errorf("unexpected digest: %v != %v", dgst, expected) + } + if w.expected != "" && w.expected != dgst { + return errors.Errorf("unexpected digest: %v != %v", dgst, w.expected) + } + w.main.addValue(dgst, w.buffer.Bytes()) + return w.Close() +} + +func (w *bufferedWriter) Truncate(size int64) error { + if size != 0 { + return errors.New("Truncate: unsupported size") + } + w.offset = 0 + w.digester.Hash().Reset() + w.buffer.Reset() + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/copy.go b/vendor/github.com/moby/buildkit/util/contentutil/copy.go new file mode 100644 index 0000000000..a03f16e65e --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/contentutil/copy.go @@ -0,0 +1,43 @@ +package contentutil + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/remotes" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +func Copy(ctx context.Context, ingester content.Ingester, provider content.Provider, desc ocispec.Descriptor) error { + if _, err := remotes.FetchHandler(ingester, &localFetcher{provider})(ctx, desc); err != nil { + return err + } + return nil +} + +type localFetcher struct { + content.Provider +} + +func (f *localFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { + r, err := f.Provider.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + return &rc{ReaderAt: r}, nil +} + +type rc struct { + content.ReaderAt + offset int +} + +func (r *rc) Read(b []byte) (int, error) { + n, err := r.ReadAt(b, int64(r.offset)) + r.offset += n + if n > 0 && err == io.EOF { + err = nil + } + return n, err +} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go b/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go new file mode 100644 index 0000000000..645b619603 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go @@ -0,0 +1,77 @@ +package contentutil + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/remotes" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func FromFetcher(f remotes.Fetcher, desc ocispec.Descriptor) content.Provider { + return &fetchedProvider{ + f: f, + desc: desc, + } +} + +type fetchedProvider struct { + f remotes.Fetcher + desc ocispec.Descriptor +} + +func (p *fetchedProvider) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + if desc.Digest != p.desc.Digest { + return nil, errors.Wrapf(errdefs.ErrNotFound, "content %v", desc.Digest) + } + + rc, err := p.f.Fetch(ctx, p.desc) + if err != nil { + return nil, err + } + + return &readerAt{Reader: rc, Closer: rc, size: p.desc.Size}, nil +} + +type readerAt struct { + io.Reader + io.Closer + size int64 + offset int64 +} + +func (r *readerAt) ReadAt(b []byte, off int64) (int, error) { + if ra, ok := r.Reader.(io.ReaderAt); ok { + return ra.ReadAt(b, off) + } + + if r.offset != off { + if seeker, ok := r.Reader.(io.Seeker); ok { + if _, err := seeker.Seek(off, io.SeekStart); err != nil { + return 0, err + } + r.offset = off + } else { + return 0, errors.Errorf("unsupported offset") + } + } + + var totalN int + for len(b) > 0 { + n, err := r.Reader.Read(b) + r.offset += int64(n) + totalN += n + b = b[n:] + if err != nil { + return totalN, err + } + } + return totalN, nil +} + +func (r *readerAt) Size() int64 { + return r.size +} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go b/vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go new file mode 100644 index 0000000000..fd635ad456 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go @@ -0,0 +1,44 @@ +package contentutil + +import ( + "context" + "sync" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func NewMultiProvider(base content.Provider) *MultiProvider { + return &MultiProvider{ + base: base, + sub: map[digest.Digest]content.Provider{}, + } +} + +type MultiProvider struct { + mu sync.RWMutex + base content.Provider + sub map[digest.Digest]content.Provider +} + +func (mp *MultiProvider) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + mp.mu.RLock() + if p, ok := mp.sub[desc.Digest]; ok { + mp.mu.RUnlock() + return p.ReaderAt(ctx, desc) + } + mp.mu.RUnlock() + if mp.base == nil { + return nil, errors.Wrapf(errdefs.ErrNotFound, "content %v", desc.Digest) + } + return mp.base.ReaderAt(ctx, desc) +} + +func (mp *MultiProvider) Add(dgst digest.Digest, p content.Provider) { + mp.mu.Lock() + defer mp.mu.Unlock() + mp.sub[dgst] = p +} diff --git a/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go b/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go new file mode 100644 index 0000000000..e51824a4f3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go @@ -0,0 +1,324 @@ +package flightcontrol + +import ( + "context" + "io" + "runtime" + "sort" + "sync" + "time" + + "github.com/moby/buildkit/util/progress" + "github.com/pkg/errors" +) + +// flightcontrol is like singleflight but with support for cancellation and +// nested progress reporting + +var errRetry = errors.Errorf("retry") + +type contextKeyT string + +var contextKey = contextKeyT("buildkit/util/flightcontrol.progress") + +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +func (g *Group) Do(ctx context.Context, key string, fn func(ctx context.Context) (interface{}, error)) (v interface{}, err error) { + defer func() { + if errors.Cause(err) == errRetry { + runtime.Gosched() + v, err = g.Do(ctx, key, fn) + } + }() + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + + if c, ok := g.m[key]; ok { // register 2nd waiter + g.mu.Unlock() + return c.wait(ctx) + } + + c := newCall(fn) + g.m[key] = c + go func() { + // cleanup after a caller has returned + <-c.ready + g.mu.Lock() + delete(g.m, key) + g.mu.Unlock() + }() + g.mu.Unlock() + return c.wait(ctx) +} + +type call struct { + mu sync.Mutex + result interface{} + err error + ready chan struct{} + + ctx *sharedContext + ctxs []context.Context + fn func(ctx context.Context) (interface{}, error) + once sync.Once + + closeProgressWriter func() + progressState *progressState + progressCtx context.Context +} + +func newCall(fn func(ctx context.Context) (interface{}, error)) *call { + c := &call{ + fn: fn, + ready: make(chan struct{}), + progressState: newProgressState(), + } + ctx := newContext(c) // newSharedContext + pr, pctx, closeProgressWriter := progress.NewContext(context.Background()) + + c.progressCtx = pctx + c.ctx = ctx + c.closeProgressWriter = closeProgressWriter + + go c.progressState.run(pr) // TODO: remove this, wrap writer instead + + return c +} + +func (c *call) run() { + defer c.closeProgressWriter() + v, err := c.fn(c.ctx) + c.mu.Lock() + c.result = v + c.err = err + c.mu.Unlock() + close(c.ready) +} + +func (c *call) wait(ctx context.Context) (v interface{}, err error) { + c.mu.Lock() + // detect case where caller has just returned, let it clean up before + select { + case <-c.ready: // could return if no error + c.mu.Unlock() + return nil, errRetry + default: + } + + pw, ok, ctx := progress.FromContext(ctx) + if ok { + c.progressState.add(pw) + } + c.ctxs = append(c.ctxs, ctx) + + c.mu.Unlock() + + go c.once.Do(c.run) + + select { + case <-ctx.Done(): + select { + case <-c.ctx.Done(): + // if this cancelled the last context, then wait for function to shut down + // and don't accept any more callers + <-c.ready + return c.result, c.err + default: + if ok { + c.progressState.close(pw) + } + return nil, ctx.Err() + } + case <-c.ready: + return c.result, c.err // shared not implemented yet + } +} + +func (c *call) Deadline() (deadline time.Time, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + for _, ctx := range c.ctxs { + select { + case <-ctx.Done(): + default: + dl, ok := ctx.Deadline() + if ok { + return dl, ok + } + } + } + return time.Time{}, false +} + +func (c *call) Done() <-chan struct{} { + c.mu.Lock() + c.ctx.signal() + c.mu.Unlock() + return c.ctx.done +} + +func (c *call) Err() error { + select { + case <-c.ctx.Done(): + return c.ctx.err + default: + return nil + } +} + +func (c *call) Value(key interface{}) interface{} { + if key == contextKey { + return c.progressState + } + c.mu.Lock() + defer c.mu.Unlock() + + ctx := c.progressCtx + select { + case <-ctx.Done(): + default: + if v := ctx.Value(key); v != nil { + return v + } + } + + if len(c.ctxs) > 0 { + ctx = c.ctxs[0] + select { + case <-ctx.Done(): + default: + if v := ctx.Value(key); v != nil { + return v + } + } + } + + return nil +} + +type sharedContext struct { + *call + done chan struct{} + err error +} + +func newContext(c *call) *sharedContext { + return &sharedContext{call: c, done: make(chan struct{})} +} + +// call with lock +func (c *sharedContext) signal() { + select { + case <-c.done: + default: + var err error + for _, ctx := range c.ctxs { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + return + } + } + c.err = err + close(c.done) + } +} + +type rawProgressWriter interface { + WriteRawProgress(*progress.Progress) error + Close() error +} + +type progressState struct { + mu sync.Mutex + items map[string]*progress.Progress + writers []rawProgressWriter + done bool +} + +func newProgressState() *progressState { + return &progressState{ + items: make(map[string]*progress.Progress), + } +} + +func (ps *progressState) run(pr progress.Reader) { + for { + p, err := pr.Read(context.TODO()) + if err != nil { + if err == io.EOF { + ps.mu.Lock() + ps.done = true + ps.mu.Unlock() + for _, w := range ps.writers { + w.Close() + } + } + return + } + ps.mu.Lock() + for _, p := range p { + for _, w := range ps.writers { + w.WriteRawProgress(p) + } + ps.items[p.ID] = p + } + ps.mu.Unlock() + } +} + +func (ps *progressState) add(pw progress.Writer) { + rw, ok := pw.(rawProgressWriter) + if !ok { + return + } + ps.mu.Lock() + plist := make([]*progress.Progress, 0, len(ps.items)) + for _, p := range ps.items { + plist = append(plist, p) + } + sort.Slice(plist, func(i, j int) bool { + return plist[i].Timestamp.Before(plist[j].Timestamp) + }) + for _, p := range plist { + rw.WriteRawProgress(p) + } + if ps.done { + rw.Close() + } else { + ps.writers = append(ps.writers, rw) + } + ps.mu.Unlock() +} + +func (ps *progressState) close(pw progress.Writer) { + rw, ok := pw.(rawProgressWriter) + if !ok { + return + } + ps.mu.Lock() + for i, w := range ps.writers { + if w == rw { + w.Close() + ps.writers = append(ps.writers[:i], ps.writers[i+1:]...) + break + } + } + ps.mu.Unlock() +} + +func WriteProgress(ctx context.Context, pw progress.Writer) error { + v := ctx.Value(contextKey) + p, ok := v.(*progressState) + if !ok { + return errors.Errorf("invalid context not from flightcontrol") + } + p.add(pw) + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/imageutil/config.go b/vendor/github.com/moby/buildkit/util/imageutil/config.go new file mode 100644 index 0000000000..c9d5e391ff --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/imageutil/config.go @@ -0,0 +1,153 @@ +package imageutil + +import ( + "context" + "encoding/json" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/reference" + "github.com/containerd/containerd/remotes" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type IngesterProvider interface { + content.Ingester + content.Provider +} + +func Config(ctx context.Context, str string, resolver remotes.Resolver, ingester IngesterProvider, platform string) (digest.Digest, []byte, error) { + if platform == "" { + platform = platforms.Default() + } + ref, err := reference.Parse(str) + if err != nil { + return "", nil, errors.WithStack(err) + } + + desc := ocispec.Descriptor{ + Digest: ref.Digest(), + } + if desc.Digest != "" { + ra, err := ingester.ReaderAt(ctx, desc) + if err == nil { + desc.Size = ra.Size() + mt, err := DetectManifestMediaType(ra) + if err == nil { + desc.MediaType = mt + } + } + } + // use resolver if desc is incomplete + if desc.MediaType == "" { + _, desc, err = resolver.Resolve(ctx, ref.String()) + if err != nil { + return "", nil, err + } + } + + fetcher, err := resolver.Fetcher(ctx, ref.String()) + if err != nil { + return "", nil, err + } + + handlers := []images.Handler{ + remotes.FetchHandler(ingester, fetcher), + childrenConfigHandler(ingester, platform), + } + if err := images.Dispatch(ctx, images.Handlers(handlers...), desc); err != nil { + return "", nil, err + } + config, err := images.Config(ctx, ingester, desc, platform) + if err != nil { + return "", nil, err + } + + dt, err := content.ReadBlob(ctx, ingester, config) + if err != nil { + return "", nil, err + } + + return desc.Digest, dt, nil +} + +func childrenConfigHandler(provider content.Provider, platform string) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + var descs []ocispec.Descriptor + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + // TODO(stevvooe): We just assume oci manifest, for now. There may be + // subtle differences from the docker version. + var manifest ocispec.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return nil, err + } + + descs = append(descs, manifest.Config) + case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + var index ocispec.Index + if err := json.Unmarshal(p, &index); err != nil { + return nil, err + } + + if platform != "" { + pf, err := platforms.Parse(platform) + if err != nil { + return nil, err + } + matcher := platforms.NewMatcher(pf) + + for _, d := range index.Manifests { + if d.Platform == nil || matcher.Match(*d.Platform) { + descs = append(descs, d) + } + } + } else { + descs = append(descs, index.Manifests...) + } + case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + // childless data types. + return nil, nil + default: + return nil, errors.Errorf("encountered unknown type %v; children may not be fetched", desc.MediaType) + } + + return descs, nil + } +} + +// ocispec.MediaTypeImageManifest, // TODO: detect schema1/manifest-list +func DetectManifestMediaType(ra content.ReaderAt) (string, error) { + // TODO: schema1 + + p := make([]byte, ra.Size()) + if _, err := ra.ReadAt(p, 0); err != nil { + return "", err + } + + var mfst struct { + Config json.RawMessage `json:"config"` + } + + if err := json.Unmarshal(p, &mfst); err != nil { + return "", err + } + + if mfst.Config != nil { + return images.MediaTypeDockerSchema2Manifest, nil + } + return images.MediaTypeDockerSchema2ManifestList, nil +} diff --git a/vendor/github.com/moby/buildkit/util/libcontainer_specconv/README.md b/vendor/github.com/moby/buildkit/util/libcontainer_specconv/README.md new file mode 100644 index 0000000000..7b985bafbf --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/libcontainer_specconv/README.md @@ -0,0 +1 @@ +Temporary forked from https://github.com/opencontainers/runc/pull/1692 diff --git a/vendor/github.com/moby/buildkit/util/libcontainer_specconv/example.go b/vendor/github.com/moby/buildkit/util/libcontainer_specconv/example.go new file mode 100644 index 0000000000..6ca1e06d76 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/libcontainer_specconv/example.go @@ -0,0 +1,190 @@ +package specconv + +import ( + "os" + "sort" + "strings" + + "github.com/opencontainers/runc/libcontainer/system" + "github.com/opencontainers/runc/libcontainer/user" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// RootlessOpts is an optional spec for ToRootless +type RootlessOpts struct { + // Add sub{u,g}id to spec.Linux.{U,G}IDMappings. + // Requires newuidmap(1) and newgidmap(1) with suid bit. + // Ignored when running in userns. + MapSubUIDGID bool +} + +// Run-time context for ToRootless. +type RootlessContext struct { + EUID uint32 + EGID uint32 + SubUIDs []user.SubID + SubGIDs []user.SubID + UIDMap []user.IDMap + GIDMap []user.IDMap + InUserNS bool +} + +// ToRootless converts the given spec file into one that should work with +// rootless containers, by removing incompatible options and adding others that +// are needed. +func ToRootless(spec *specs.Spec, opts *RootlessOpts) error { + var err error + ctx := RootlessContext{} + ctx.EUID = uint32(os.Geteuid()) + ctx.EGID = uint32(os.Getegid()) + ctx.SubUIDs, err = user.CurrentUserSubUIDs() + if err != nil && !os.IsNotExist(err) { + return err + } + ctx.SubGIDs, err = user.CurrentGroupSubGIDs() + if err != nil && !os.IsNotExist(err) { + return err + } + ctx.UIDMap, err = user.CurrentProcessUIDMap() + if err != nil && !os.IsNotExist(err) { + return err + } + uidMapExists := !os.IsNotExist(err) + ctx.GIDMap, err = user.CurrentProcessUIDMap() + if err != nil && !os.IsNotExist(err) { + return err + } + ctx.InUserNS = uidMapExists && system.UIDMapInUserNS(ctx.UIDMap) + return ToRootlessWithContext(ctx, spec, opts) +} + +// ToRootlessWithContext converts the spec with the run-time context. +// ctx can be internally modified for sorting. +func ToRootlessWithContext(ctx RootlessContext, spec *specs.Spec, opts *RootlessOpts) error { + if opts == nil { + opts = &RootlessOpts{} + } + var namespaces []specs.LinuxNamespace + + // Remove networkns from the spec. + for _, ns := range spec.Linux.Namespaces { + switch ns.Type { + case specs.NetworkNamespace, specs.UserNamespace: + // Do nothing. + default: + namespaces = append(namespaces, ns) + } + } + // Add userns to the spec. + namespaces = append(namespaces, specs.LinuxNamespace{ + Type: specs.UserNamespace, + }) + spec.Linux.Namespaces = namespaces + + // Add mappings for the current user. + if ctx.InUserNS { + uNextContainerID := 0 + sort.Sort(idmapSorter(ctx.UIDMap)) + for _, uidmap := range ctx.UIDMap { + spec.Linux.UIDMappings = append(spec.Linux.UIDMappings, + specs.LinuxIDMapping{ + HostID: uint32(uidmap.ID), + ContainerID: uint32(uNextContainerID), + Size: uint32(uidmap.Count), + }) + uNextContainerID += uidmap.Count + } + gNextContainerID := 0 + sort.Sort(idmapSorter(ctx.GIDMap)) + for _, gidmap := range ctx.GIDMap { + spec.Linux.GIDMappings = append(spec.Linux.GIDMappings, + specs.LinuxIDMapping{ + HostID: uint32(gidmap.ID), + ContainerID: uint32(gNextContainerID), + Size: uint32(gidmap.Count), + }) + gNextContainerID += gidmap.Count + } + // opts.MapSubUIDGID is ignored in userns + } else { + spec.Linux.UIDMappings = []specs.LinuxIDMapping{{ + HostID: ctx.EUID, + ContainerID: 0, + Size: 1, + }} + spec.Linux.GIDMappings = []specs.LinuxIDMapping{{ + HostID: ctx.EGID, + ContainerID: 0, + Size: 1, + }} + if opts.MapSubUIDGID { + uNextContainerID := 1 + sort.Sort(subIDSorter(ctx.SubUIDs)) + for _, subuid := range ctx.SubUIDs { + spec.Linux.UIDMappings = append(spec.Linux.UIDMappings, + specs.LinuxIDMapping{ + HostID: uint32(subuid.SubID), + ContainerID: uint32(uNextContainerID), + Size: uint32(subuid.Count), + }) + uNextContainerID += subuid.Count + } + gNextContainerID := 1 + sort.Sort(subIDSorter(ctx.SubGIDs)) + for _, subgid := range ctx.SubGIDs { + spec.Linux.GIDMappings = append(spec.Linux.GIDMappings, + specs.LinuxIDMapping{ + HostID: uint32(subgid.SubID), + ContainerID: uint32(gNextContainerID), + Size: uint32(subgid.Count), + }) + gNextContainerID += subgid.Count + } + } + } + + // Fix up mounts. + var mounts []specs.Mount + for _, mount := range spec.Mounts { + // Ignore all mounts that are under /sys. + if strings.HasPrefix(mount.Destination, "/sys") { + continue + } + + // Remove all gid= and uid= mappings. + var options []string + for _, option := range mount.Options { + if !strings.HasPrefix(option, "gid=") && !strings.HasPrefix(option, "uid=") { + options = append(options, option) + } + } + + mount.Options = options + mounts = append(mounts, mount) + } + // Add the sysfs mount as an rbind. + mounts = append(mounts, specs.Mount{ + Source: "/sys", + Destination: "/sys", + Type: "none", + Options: []string{"rbind", "nosuid", "noexec", "nodev", "ro"}, + }) + spec.Mounts = mounts + + // Remove cgroup settings. + spec.Linux.Resources = nil + return nil +} + +// subIDSorter is required for Go <= 1.7 +type subIDSorter []user.SubID + +func (x subIDSorter) Len() int { return len(x) } +func (x subIDSorter) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x subIDSorter) Less(i, j int) bool { return x[i].SubID < x[j].SubID } + +type idmapSorter []user.IDMap + +func (x idmapSorter) Len() int { return len(x) } +func (x idmapSorter) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x idmapSorter) Less(i, j int) bool { return x[i].ID < x[j].ID } diff --git a/vendor/github.com/moby/buildkit/util/progress/logs/logs.go b/vendor/github.com/moby/buildkit/util/progress/logs/logs.go new file mode 100644 index 0000000000..54f6ff8965 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/progress/logs/logs.go @@ -0,0 +1,53 @@ +package logs + +import ( + "context" + "io" + "os" + + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/util/progress" + "github.com/pkg/errors" +) + +func NewLogStreams(ctx context.Context, printOutput bool) (io.WriteCloser, io.WriteCloser) { + return newStreamWriter(ctx, 1, printOutput), newStreamWriter(ctx, 2, printOutput) +} + +func newStreamWriter(ctx context.Context, stream int, printOutput bool) io.WriteCloser { + pw, _, _ := progress.FromContext(ctx) + return &streamWriter{ + pw: pw, + stream: stream, + printOutput: printOutput, + } +} + +type streamWriter struct { + pw progress.Writer + stream int + printOutput bool +} + +func (sw *streamWriter) Write(dt []byte) (int, error) { + sw.pw.Write(identity.NewID(), client.VertexLog{ + Stream: sw.stream, + Data: append([]byte{}, dt...), + }) + if sw.printOutput { + switch sw.stream { + case 1: + return os.Stdout.Write(dt) + case 2: + return os.Stderr.Write(dt) + default: + return 0, errors.Errorf("invalid stream %d", sw.stream) + } + } + return len(dt), nil +} + +func (sw *streamWriter) Close() error { + return sw.pw.Close() +} diff --git a/vendor/github.com/moby/buildkit/util/progress/multireader.go b/vendor/github.com/moby/buildkit/util/progress/multireader.go new file mode 100644 index 0000000000..2bd3f2ca86 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/progress/multireader.go @@ -0,0 +1,77 @@ +package progress + +import ( + "context" + "io" + "sync" +) + +type MultiReader struct { + mu sync.Mutex + main Reader + initialized bool + done chan struct{} + writers map[*progressWriter]func() +} + +func NewMultiReader(pr Reader) *MultiReader { + mr := &MultiReader{ + main: pr, + writers: make(map[*progressWriter]func()), + done: make(chan struct{}), + } + return mr +} + +func (mr *MultiReader) Reader(ctx context.Context) Reader { + mr.mu.Lock() + defer mr.mu.Unlock() + + pr, ctx, closeWriter := NewContext(ctx) + pw, _, ctx := FromContext(ctx) + + w := pw.(*progressWriter) + mr.writers[w] = closeWriter + + go func() { + select { + case <-ctx.Done(): + case <-mr.done: + } + mr.mu.Lock() + defer mr.mu.Unlock() + delete(mr.writers, w) + }() + + if !mr.initialized { + go mr.handle() + mr.initialized = true + } + + return pr +} + +func (mr *MultiReader) handle() error { + for { + p, err := mr.main.Read(context.TODO()) + if err != nil { + if err == io.EOF { + mr.mu.Lock() + for w, c := range mr.writers { + w.Close() + c() + } + mr.mu.Unlock() + return nil + } + return err + } + mr.mu.Lock() + for _, p := range p { + for w := range mr.writers { + w.writeRawProgress(p) + } + } + mr.mu.Unlock() + } +} diff --git a/vendor/github.com/moby/buildkit/util/progress/multiwriter.go b/vendor/github.com/moby/buildkit/util/progress/multiwriter.go new file mode 100644 index 0000000000..51989368ce --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/progress/multiwriter.go @@ -0,0 +1,105 @@ +package progress + +import ( + "sort" + "sync" + "time" +) + +type rawProgressWriter interface { + WriteRawProgress(*Progress) error + Close() error +} + +type MultiWriter struct { + mu sync.Mutex + items []*Progress + writers map[rawProgressWriter]struct{} + done bool + meta map[string]interface{} +} + +func NewMultiWriter(opts ...WriterOption) *MultiWriter { + mw := &MultiWriter{ + writers: map[rawProgressWriter]struct{}{}, + meta: map[string]interface{}{}, + } + for _, o := range opts { + o(mw) + } + return mw +} + +func (ps *MultiWriter) Add(pw Writer) { + rw, ok := pw.(rawProgressWriter) + if !ok { + return + } + ps.mu.Lock() + plist := make([]*Progress, 0, len(ps.items)) + for _, p := range ps.items { + plist = append(plist, p) + } + sort.Slice(plist, func(i, j int) bool { + return plist[i].Timestamp.Before(plist[j].Timestamp) + }) + for _, p := range plist { + rw.WriteRawProgress(p) + } + ps.writers[rw] = struct{}{} + ps.mu.Unlock() +} + +func (ps *MultiWriter) Delete(pw Writer) { + rw, ok := pw.(rawProgressWriter) + if !ok { + return + } + + ps.mu.Lock() + delete(ps.writers, rw) + ps.mu.Unlock() +} + +func (ps *MultiWriter) Write(id string, v interface{}) error { + p := &Progress{ + ID: id, + Timestamp: time.Now(), + Sys: v, + meta: ps.meta, + } + return ps.WriteRawProgress(p) +} + +func (ps *MultiWriter) WriteRawProgress(p *Progress) error { + meta := p.meta + if len(ps.meta) > 0 { + meta = map[string]interface{}{} + for k, v := range p.meta { + meta[k] = v + } + for k, v := range ps.meta { + if _, ok := meta[k]; !ok { + meta[k] = v + } + } + } + p.meta = meta + return ps.writeRawProgress(p) +} + +func (ps *MultiWriter) writeRawProgress(p *Progress) error { + ps.mu.Lock() + defer ps.mu.Unlock() + ps.items = append(ps.items, p) + for w := range ps.writers { + if err := w.WriteRawProgress(p); err != nil { + return err + } + } + return nil +} + +func (ps *MultiWriter) Close() error { + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/progress/progress.go b/vendor/github.com/moby/buildkit/util/progress/progress.go new file mode 100644 index 0000000000..b802716bf7 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/progress/progress.go @@ -0,0 +1,252 @@ +package progress + +import ( + "context" + "io" + "sort" + "sync" + "time" + + "github.com/pkg/errors" +) + +// Progress package provides utility functions for using the context to capture +// progress of a running function. All progress items written contain an ID +// that is used to collapse unread messages. + +type contextKeyT string + +var contextKey = contextKeyT("buildkit/util/progress") + +// FromContext returns a progress writer from a context. +func FromContext(ctx context.Context, opts ...WriterOption) (Writer, bool, context.Context) { + v := ctx.Value(contextKey) + pw, ok := v.(*progressWriter) + if !ok { + if pw, ok := v.(*MultiWriter); ok { + return pw, true, ctx + } + return &noOpWriter{}, false, ctx + } + pw = newWriter(pw) + for _, o := range opts { + o(pw) + } + ctx = context.WithValue(ctx, contextKey, pw) + return pw, true, ctx +} + +type WriterOption func(Writer) + +// NewContext returns a new context and a progress reader that captures all +// progress items writtern to this context. Last returned parameter is a closer +// function to signal that no new writes will happen to this context. +func NewContext(ctx context.Context) (Reader, context.Context, func()) { + pr, pw, cancel := pipe() + ctx = WithProgress(ctx, pw) + return pr, ctx, cancel +} + +func WithProgress(ctx context.Context, pw Writer) context.Context { + return context.WithValue(ctx, contextKey, pw) +} + +func WithMetadata(key string, val interface{}) WriterOption { + return func(w Writer) { + if pw, ok := w.(*progressWriter); ok { + pw.meta[key] = val + } + if pw, ok := w.(*MultiWriter); ok { + pw.meta[key] = val + } + } +} + +type Writer interface { + Write(id string, value interface{}) error + Close() error +} + +type Reader interface { + Read(context.Context) ([]*Progress, error) +} + +type Progress struct { + ID string + Timestamp time.Time + Sys interface{} + meta map[string]interface{} +} + +type Status struct { + Action string + Current int + Total int + Started *time.Time + Completed *time.Time +} + +type progressReader struct { + ctx context.Context + cond *sync.Cond + mu sync.Mutex + writers map[*progressWriter]struct{} + dirty map[string]*Progress +} + +func (pr *progressReader) Read(ctx context.Context) ([]*Progress, error) { + done := make(chan struct{}) + defer close(done) + go func() { + select { + case <-done: + case <-ctx.Done(): + pr.cond.Broadcast() + } + }() + pr.mu.Lock() + for { + select { + case <-ctx.Done(): + pr.mu.Unlock() + return nil, ctx.Err() + default: + } + dmap := pr.dirty + if len(dmap) == 0 { + select { + case <-pr.ctx.Done(): + if len(pr.writers) == 0 { + pr.mu.Unlock() + return nil, io.EOF + } + default: + } + pr.cond.Wait() + continue + } + pr.dirty = make(map[string]*Progress) + pr.mu.Unlock() + + out := make([]*Progress, 0, len(dmap)) + for _, p := range dmap { + out = append(out, p) + } + + sort.Slice(out, func(i, j int) bool { + return out[i].Timestamp.Before(out[j].Timestamp) + }) + + return out, nil + } +} + +func (pr *progressReader) append(pw *progressWriter) { + pr.mu.Lock() + defer pr.mu.Unlock() + + select { + case <-pr.ctx.Done(): + return + default: + pr.writers[pw] = struct{}{} + } +} + +func pipe() (*progressReader, *progressWriter, func()) { + ctx, cancel := context.WithCancel(context.Background()) + pr := &progressReader{ + ctx: ctx, + writers: make(map[*progressWriter]struct{}), + dirty: make(map[string]*Progress), + } + pr.cond = sync.NewCond(&pr.mu) + go func() { + <-ctx.Done() + pr.cond.Broadcast() + }() + pw := &progressWriter{ + reader: pr, + } + return pr, pw, cancel +} + +func newWriter(pw *progressWriter) *progressWriter { + meta := make(map[string]interface{}) + for k, v := range pw.meta { + meta[k] = v + } + pw = &progressWriter{ + reader: pw.reader, + meta: meta, + } + pw.reader.append(pw) + return pw +} + +type progressWriter struct { + done bool + reader *progressReader + meta map[string]interface{} +} + +func (pw *progressWriter) Write(id string, v interface{}) error { + if pw.done { + return errors.Errorf("writing %s to closed progress writer", id) + } + return pw.writeRawProgress(&Progress{ + ID: id, + Timestamp: time.Now(), + Sys: v, + meta: pw.meta, + }) +} + +func (pw *progressWriter) WriteRawProgress(p *Progress) error { + meta := p.meta + if len(pw.meta) > 0 { + meta = map[string]interface{}{} + for k, v := range p.meta { + meta[k] = v + } + for k, v := range pw.meta { + if _, ok := meta[k]; !ok { + meta[k] = v + } + } + } + p.meta = meta + return pw.writeRawProgress(p) +} + +func (pw *progressWriter) writeRawProgress(p *Progress) error { + pw.reader.mu.Lock() + pw.reader.dirty[p.ID] = p + pw.reader.cond.Broadcast() + pw.reader.mu.Unlock() + return nil +} + +func (pw *progressWriter) Close() error { + pw.reader.mu.Lock() + delete(pw.reader.writers, pw) + pw.reader.mu.Unlock() + pw.reader.cond.Broadcast() + pw.done = true + return nil +} + +func (p *Progress) Meta(key string) (interface{}, bool) { + v, ok := p.meta[key] + return v, ok +} + +type noOpWriter struct{} + +func (pw *noOpWriter) Write(_ string, _ interface{}) error { + return nil +} + +func (pw *noOpWriter) Close() error { + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/push/push.go b/vendor/github.com/moby/buildkit/util/push/push.go new file mode 100644 index 0000000000..5cc2f01d77 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/push/push.go @@ -0,0 +1,184 @@ +package push + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + "github.com/docker/distribution/reference" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/auth" + "github.com/moby/buildkit/util/imageutil" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/tracing" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +func getCredentialsFunc(ctx context.Context, sm *session.Manager) func(string) (string, string, error) { + id := session.FromContext(ctx) + if id == "" { + return nil + } + return func(host string) (string, string, error) { + timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + caller, err := sm.Get(timeoutCtx, id) + if err != nil { + return "", "", err + } + + return auth.CredentialsFunc(context.TODO(), caller)(host) + } +} + +func Push(ctx context.Context, sm *session.Manager, cs content.Provider, dgst digest.Digest, ref string, insecure bool) error { + desc := ocispec.Descriptor{ + Digest: dgst, + } + parsed, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return err + } + ref = reference.TagNameOnly(parsed).String() + + resolver := docker.NewResolver(docker.ResolverOptions{ + Client: tracing.DefaultClient, + Credentials: getCredentialsFunc(ctx, sm), + PlainHTTP: insecure, + }) + + pusher, err := resolver.Pusher(ctx, ref) + if err != nil { + return err + } + + var m sync.Mutex + manifestStack := []ocispec.Descriptor{} + + filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, + images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + m.Lock() + manifestStack = append(manifestStack, desc) + m.Unlock() + return nil, images.ErrStopHandler + default: + return nil, nil + } + }) + + pushHandler := remotes.PushHandler(pusher, cs) + + handlers := append([]images.Handler{}, + childrenHandler(cs), + filterHandler, + pushHandler, + ) + + ra, err := cs.ReaderAt(ctx, desc) + if err != nil { + return err + } + + mtype, err := imageutil.DetectManifestMediaType(ra) + if err != nil { + return err + } + + layersDone := oneOffProgress(ctx, "pushing layers") + err = images.Dispatch(ctx, images.Handlers(handlers...), ocispec.Descriptor{ + Digest: dgst, + Size: ra.Size(), + MediaType: mtype, + }) + layersDone(err) + if err != nil { + return err + } + + mfstDone := oneOffProgress(ctx, fmt.Sprintf("pushing manifest for %s", ref)) + for i := len(manifestStack) - 1; i >= 0; i-- { + _, err := pushHandler(ctx, manifestStack[i]) + if err != nil { + mfstDone(err) + return err + } + } + mfstDone(nil) + return nil +} + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.FromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + // TODO: set error on status + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} + +func childrenHandler(provider content.Provider) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + var descs []ocispec.Descriptor + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + // TODO(stevvooe): We just assume oci manifest, for now. There may be + // subtle differences from the docker version. + var manifest ocispec.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return nil, err + } + + descs = append(descs, manifest.Config) + descs = append(descs, manifest.Layers...) + case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + var index ocispec.Index + if err := json.Unmarshal(p, &index); err != nil { + return nil, err + } + + for _, m := range index.Manifests { + if m.Digest != "" { + descs = append(descs, m) + } + } + case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, + images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig, + ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip: + // childless data types. + return nil, nil + default: + logrus.Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType) + } + + return descs, nil + } +} diff --git a/vendor/github.com/moby/buildkit/util/system/path_unix.go b/vendor/github.com/moby/buildkit/util/system/path_unix.go new file mode 100644 index 0000000000..c607c4db09 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/system/path_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. This is a no-op on Linux. +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + return path, nil +} diff --git a/vendor/github.com/moby/buildkit/util/system/path_windows.go b/vendor/github.com/moby/buildkit/util/system/path_windows.go new file mode 100644 index 0000000000..cbfe2c1576 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/system/path_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package system + +import ( + "fmt" + "path/filepath" + "strings" +) + +// DefaultPathEnv is deliberately empty on Windows as the default path will be set by +// the container. Docker has no context of what the default path should be. +const DefaultPathEnv = "" + +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be contatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !filepath.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go b/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go new file mode 100644 index 0000000000..62afa03fef --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go @@ -0,0 +1,29 @@ +// +build linux,seccomp + +package system + +import ( + "sync" + + "golang.org/x/sys/unix" +) + +var seccompSupported bool +var seccompOnce sync.Once + +func SeccompSupported() bool { + seccompOnce.Do(func() { + seccompSupported = getSeccompSupported() + }) + return seccompSupported +} + +func getSeccompSupported() bool { + if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL { + return true + } + } + return false +} diff --git a/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go b/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go new file mode 100644 index 0000000000..e348c379a9 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go @@ -0,0 +1,7 @@ +// +build !linux,seccomp + +package system + +func SeccompSupported() bool { + return false +} diff --git a/vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go b/vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go new file mode 100644 index 0000000000..84cfb7fa83 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go @@ -0,0 +1,7 @@ +// +build !seccomp + +package system + +func SeccompSupported() bool { + return false +} diff --git a/vendor/github.com/moby/buildkit/util/tracing/tracing.go b/vendor/github.com/moby/buildkit/util/tracing/tracing.go new file mode 100644 index 0000000000..6af2b8c55e --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/tracing/tracing.go @@ -0,0 +1,109 @@ +package tracing + +import ( + "context" + "fmt" + "io" + "net/http" + + "github.com/opentracing-contrib/go-stdlib/nethttp" + opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" +) + +// StartSpan starts a new span as a child of the span in context. +// If there is no span in context then this is a no-op. +// The difference from opentracing.StartSpanFromContext is that this method +// does not depend on global tracer. +func StartSpan(ctx context.Context, operationName string, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) { + parent := opentracing.SpanFromContext(ctx) + tracer := opentracing.Tracer(&opentracing.NoopTracer{}) + if parent != nil { + tracer = parent.Tracer() + opts = append(opts, opentracing.ChildOf(parent.Context())) + } + span := tracer.StartSpan(operationName, opts...) + if parent != nil { + return span, opentracing.ContextWithSpan(ctx, span) + } + return span, ctx +} + +// FinishWithError finalizes the span and sets the error if one is passed +func FinishWithError(span opentracing.Span, err error) { + if err != nil { + fields := []log.Field{ + log.String("event", "error"), + log.String("message", err.Error()), + } + if _, ok := err.(interface { + Cause() error + }); ok { + fields = append(fields, log.String("stack", fmt.Sprintf("%+v", err))) + } + span.LogFields(fields...) + ext.Error.Set(span, true) + } + span.Finish() +} + +// ContextWithSpanFromContext sets the tracing span of a context from other +// context if one is not already set. Alternative would be +// context.WithoutCancel() that would copy the context but reset ctx.Done +func ContextWithSpanFromContext(ctx, ctx2 context.Context) context.Context { + // if already is a span then noop + if span := opentracing.SpanFromContext(ctx); span != nil { + return ctx + } + if span := opentracing.SpanFromContext(ctx2); span != nil { + return opentracing.ContextWithSpan(ctx, span) + } + return ctx +} + +var DefaultTransport http.RoundTripper = &Transport{ + RoundTripper: &nethttp.Transport{RoundTripper: http.DefaultTransport}, +} + +var DefaultClient = &http.Client{ + Transport: DefaultTransport, +} + +type Transport struct { + http.RoundTripper +} + +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + span := opentracing.SpanFromContext(req.Context()) + if span == nil { // no tracer connected with either request or transport + return t.RoundTripper.RoundTrip(req) + } + + req, tracer := nethttp.TraceRequest(span.Tracer(), req) + + resp, err := t.RoundTripper.RoundTrip(req) + if err != nil { + tracer.Finish() + return resp, err + } + + if req.Method == "HEAD" { + tracer.Finish() + } else { + resp.Body = closeTracker{resp.Body, tracer.Finish} + } + + return resp, err +} + +type closeTracker struct { + io.ReadCloser + finish func() +} + +func (c closeTracker) Close() error { + err := c.ReadCloser.Close() + c.finish() + return err +} diff --git a/vendor/github.com/moby/buildkit/vendor.conf b/vendor/github.com/moby/buildkit/vendor.conf index 6f29ab57f0..e4068737a6 100644 --- a/vendor/github.com/moby/buildkit/vendor.conf +++ b/vendor/github.com/moby/buildkit/vendor.conf @@ -6,7 +6,7 @@ github.com/davecgh/go-spew v1.1.0 github.com/pmezard/go-difflib v1.0.0 golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993 -github.com/containerd/containerd e1428ef05460da40720d622c803262e6fc8d3477 +github.com/containerd/containerd 63522d9eaa5a0443d225642c4b6f4f5fdedf932b github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788 golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c github.com/sirupsen/logrus v1.0.0 @@ -23,7 +23,7 @@ github.com/Microsoft/go-winio v0.4.7 github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c github.com/opencontainers/runtime-spec v1.0.1 github.com/containerd/go-runc f271fa2021de855d4d918dbef83c5fe19db1bdd5 -github.com/containerd/console cb7008ab3d8359b78c5f464cb7cf160107ad5925 +github.com/containerd/console 9290d21dc56074581f619579c43d970b4514bc08 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 @@ -36,11 +36,10 @@ github.com/docker/go-units v0.3.1 github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716 golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631 -github.com/BurntSushi/locker a6e239ea1c69bff1cfdb20c4b73dadf52f784b6a github.com/docker/docker 71cd53e4a197b303c6ba086bd584ffd67a884281 github.com/pkg/profile 5b67d428864e92711fcbd2f8629456121a56d91f -github.com/tonistiigi/fsutil dc68c74458923f357474a9178bd198aa3ed11a5f +github.com/tonistiigi/fsutil 8abad97ee3969cdf5e9c367f46adba2c212b3ddb github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b diff --git a/vendor/github.com/moby/buildkit/worker/filter.go b/vendor/github.com/moby/buildkit/worker/filter.go new file mode 100644 index 0000000000..c94a6265f3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/worker/filter.go @@ -0,0 +1,33 @@ +package worker + +import ( + "strings" + + "github.com/containerd/containerd/filters" +) + +func adaptWorker(w Worker) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "id": + return w.ID(), len(w.ID()) > 0 + case "labels": + return checkMap(fieldpath[1:], w.Labels()) + } + + return "", false + }) +} + +func checkMap(fieldpath []string, m map[string]string) (string, bool) { + if len(m) == 0 { + return "", false + } + + value, ok := m[strings.Join(fieldpath, ".")] + return value, ok +} diff --git a/vendor/github.com/moby/buildkit/worker/result.go b/vendor/github.com/moby/buildkit/worker/result.go new file mode 100644 index 0000000000..9aa6af4167 --- /dev/null +++ b/vendor/github.com/moby/buildkit/worker/result.go @@ -0,0 +1,40 @@ +package worker + +import ( + "context" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/solver" +) + +func NewWorkerRefResult(ref cache.ImmutableRef, worker Worker) solver.Result { + return &workerRefResult{&WorkerRef{ImmutableRef: ref, Worker: worker}} +} + +type WorkerRef struct { + ImmutableRef cache.ImmutableRef + Worker Worker +} + +func (wr *WorkerRef) ID() string { + refID := "" + if wr.ImmutableRef != nil { + refID = wr.ImmutableRef.ID() + } + return wr.Worker.ID() + "::" + refID +} + +type workerRefResult struct { + *WorkerRef +} + +func (r *workerRefResult) Release(ctx context.Context) error { + if r.ImmutableRef == nil { + return nil + } + return r.ImmutableRef.Release(ctx) +} + +func (r *workerRefResult) Sys() interface{} { + return r.WorkerRef +} diff --git a/vendor/github.com/moby/buildkit/worker/worker.go b/vendor/github.com/moby/buildkit/worker/worker.go new file mode 100644 index 0000000000..84fec1f8d4 --- /dev/null +++ b/vendor/github.com/moby/buildkit/worker/worker.go @@ -0,0 +1,41 @@ +package worker + +import ( + "context" + "io" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/solver" + digest "github.com/opencontainers/go-digest" +) + +type Worker interface { + // ID needs to be unique in the cluster + ID() string + Labels() map[string]string + LoadRef(id string) (cache.ImmutableRef, error) + // ResolveOp resolves Vertex.Sys() to Op implementation. + ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge) (solver.Op, error) + ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error) + // Exec is similar to executor.Exec but without []mount.Mount + Exec(ctx context.Context, meta executor.Meta, rootFS cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error + DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) + Exporter(name string) (exporter.Exporter, error) + Prune(ctx context.Context, ch chan client.UsageInfo) error + GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool) (*solver.Remote, error) + FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) +} + +// Pre-defined label keys +const ( + labelPrefix = "org.mobyproject.buildkit.worker." + LabelOS = labelPrefix + "os" // GOOS + LabelArch = labelPrefix + "arch" // GOARCH + LabelExecutor = labelPrefix + "executor" // "oci" or "containerd" + LabelSnapshotter = labelPrefix + "snapshotter" // containerd snapshotter name ("overlay", "native", ...) + LabelHostname = labelPrefix + "hostname" +) diff --git a/vendor/github.com/moby/buildkit/worker/workercontroller.go b/vendor/github.com/moby/buildkit/worker/workercontroller.go new file mode 100644 index 0000000000..2e52006ca4 --- /dev/null +++ b/vendor/github.com/moby/buildkit/worker/workercontroller.go @@ -0,0 +1,60 @@ +package worker + +import ( + "sync" + + "github.com/containerd/containerd/filters" + "github.com/pkg/errors" +) + +// Controller holds worker instances. +// Currently, only local workers are supported. +type Controller struct { + // TODO: define worker interface and support remote ones + workers sync.Map + defaultID string +} + +// Add adds a local worker +func (c *Controller) Add(w Worker) error { + c.workers.Store(w.ID(), w) + if c.defaultID == "" { + c.defaultID = w.ID() + } + return nil +} + +// List lists workers +func (c *Controller) List(filterStrings ...string) ([]Worker, error) { + filter, err := filters.ParseAll(filterStrings...) + if err != nil { + return nil, err + } + var workers []Worker + c.workers.Range(func(k, v interface{}) bool { + w := v.(Worker) + if filter.Match(adaptWorker(w)) { + workers = append(workers, w) + } + return true + }) + return workers, nil +} + +// GetDefault returns the default local worker +func (c *Controller) GetDefault() (Worker, error) { + if c.defaultID == "" { + return nil, errors.Errorf("no default worker") + } + return c.Get(c.defaultID) +} + +func (c *Controller) Get(id string) (Worker, error) { + v, ok := c.workers.Load(id) + if !ok { + return nil, errors.Errorf("worker %s not found", id) + } + return v.(Worker), nil +} + +// TODO: add Get(Constraint) (*Worker, error) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go index 3cae4fd8d9..b1c4762fe2 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go @@ -141,9 +141,10 @@ type Config struct { // OomScoreAdj specifies the adjustment to be made by the kernel when calculating oom scores // for a process. Valid values are between the range [-1000, '1000'], where processes with - // higher scores are preferred for being killed. + // higher scores are preferred for being killed. If it is unset then we don't touch the current + // value. // More information about kernel oom score calculation here: https://lwn.net/Articles/317814/ - OomScoreAdj int `json:"oom_score_adj"` + OomScoreAdj *int `json:"oom_score_adj,omitempty"` // UidMappings is an array of User ID mappings for User Namespaces UidMappings []IDMap `json:"uid_mappings"` diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c index 2c69cee5d6..a4cd1399d9 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c +++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c @@ -505,7 +505,8 @@ void join_namespaces(char *nslist) ns->fd = fd; ns->ns = nsflag(namespace); - strncpy(ns->path, path, PATH_MAX); + strncpy(ns->path, path, PATH_MAX - 1); + ns->path[PATH_MAX - 1] = '\0'; } while ((namespace = strtok_r(NULL, ",", &saveptr)) != NULL); /* @@ -678,17 +679,15 @@ void nsexec(void) /* * Enable setgroups(2) if we've been asked to. But we also * have to explicitly disable setgroups(2) if we're - * creating a rootless container (this is required since - * Linux 3.19). + * creating a rootless container for single-entry mapping. + * i.e. config.is_setgroup == false. + * (this is required since Linux 3.19). + * + * For rootless multi-entry mapping, config.is_setgroup shall be true and + * newuidmap/newgidmap shall be used. */ - if (config.is_rootless && config.is_setgroup) { - kill(child, SIGKILL); - bail("cannot allow setgroup in an unprivileged user namespace setup"); - } - if (config.is_setgroup) - update_setgroups(child, SETGROUPS_ALLOW); - if (config.is_rootless) + if (config.is_rootless && !config.is_setgroup) update_setgroups(child, SETGROUPS_DENY); /* Set up mappings. */ @@ -809,25 +808,30 @@ void nsexec(void) if (config.namespaces) join_namespaces(config.namespaces); - /* - * Unshare all of the namespaces. Now, it should be noted that this - * ordering might break in the future (especially with rootless - * containers). But for now, it's not possible to split this into - * CLONE_NEWUSER + [the rest] because of some RHEL SELinux issues. - * - * Note that we don't merge this with clone() because there were - * some old kernel versions where clone(CLONE_PARENT | CLONE_NEWPID) - * was broken, so we'll just do it the long way anyway. - */ - if (unshare(config.cloneflags) < 0) - bail("failed to unshare namespaces"); - /* * Deal with user namespaces first. They are quite special, as they * affect our ability to unshare other namespaces and are used as * context for privilege checks. + * + * We don't unshare all namespaces in one go. The reason for this + * is that, while the kernel documentation may claim otherwise, + * there are certain cases where unsharing all namespaces at once + * will result in namespace objects being owned incorrectly. + * Ideally we should just fix these kernel bugs, but it's better to + * be safe than sorry, and fix them separately. + * + * A specific case of this is that the SELinux label of the + * internal kern-mount that mqueue uses will be incorrect if the + * UTS namespace is cloned before the USER namespace is mapped. + * I've also heard of similar problems with the network namespace + * in some scenarios. This also mirrors how LXC deals with this + * problem. */ if (config.cloneflags & CLONE_NEWUSER) { + if (unshare(CLONE_NEWUSER) < 0) + bail("failed to unshare user namespace"); + config.cloneflags &= ~CLONE_NEWUSER; + /* * We don't have the privileges to do any mapping here (see the * clone_parent rant). So signal our parent to hook us up. @@ -853,8 +857,21 @@ void nsexec(void) if (prctl(PR_SET_DUMPABLE, 0, 0, 0, 0) < 0) bail("failed to set process as dumpable"); } + + /* Become root in the namespace proper. */ + if (setresuid(0, 0, 0) < 0) + bail("failed to become root in user namespace"); } + /* + * Unshare all of the namespaces. Note that we don't merge this + * with clone() because there were some old kernel versions where + * clone(CLONE_PARENT | CLONE_NEWPID) was broken, so we'll just do + * it the long way. + */ + if (unshare(config.cloneflags) < 0) + bail("failed to unshare namespaces"); + /* * TODO: What about non-namespace clone flags that we're dropping here? * diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go index 5f124cd8bb..8d353d984b 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go @@ -3,13 +3,12 @@ package system import ( - "bufio" - "fmt" "os" "os/exec" "syscall" // only for exec "unsafe" + "github.com/opencontainers/runc/libcontainer/user" "golang.org/x/sys/unix" ) @@ -102,34 +101,43 @@ func Setctty() error { } // RunningInUserNS detects whether we are currently running in a user namespace. -// Copied from github.com/lxc/lxd/shared/util.go +// Originally copied from github.com/lxc/lxd/shared/util.go func RunningInUserNS() bool { - file, err := os.Open("/proc/self/uid_map") + uidmap, err := user.CurrentProcessUIDMap() if err != nil { // This kernel-provided file only exists if user namespaces are supported return false } - defer file.Close() + return UIDMapInUserNS(uidmap) +} - buf := bufio.NewReader(file) - l, _, err := buf.ReadLine() - if err != nil { - return false - } - - line := string(l) - var a, b, c int64 - fmt.Sscanf(line, "%d %d %d", &a, &b, &c) +func UIDMapInUserNS(uidmap []user.IDMap) bool { /* * We assume we are in the initial user namespace if we have a full * range - 4294967295 uids starting at uid 0. */ - if a == 0 && b == 0 && c == 4294967295 { + if len(uidmap) == 1 && uidmap[0].ID == 0 && uidmap[0].ParentID == 0 && uidmap[0].Count == 4294967295 { return false } return true } +// GetParentNSeuid returns the euid within the parent user namespace +func GetParentNSeuid() int { + euid := os.Geteuid() + uidmap, err := user.CurrentProcessUIDMap() + if err != nil { + // This kernel-provided file only exists if user namespaces are supported + return euid + } + for _, um := range uidmap { + if um.ID <= euid && euid <= um.ID+um.Count-1 { + return um.ParentID + euid - um.ID + } + } + return euid +} + // SetSubreaper sets the value i as the subreaper setting for the calling process func SetSubreaper(i int) error { return unix.Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go index e7cfd62b29..b94be74a66 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go @@ -2,8 +2,26 @@ package system +import ( + "os" + + "github.com/opencontainers/runc/libcontainer/user" +) + // RunningInUserNS is a stub for non-Linux systems // Always returns false func RunningInUserNS() bool { return false } + +// UIDMapInUserNS is a stub for non-Linux systems +// Always returns false +func UIDMapInUserNS(uidmap []user.IDMap) bool { + return false +} + +// GetParentNSeuid returns the euid within the parent user namespace +// Always returns os.Geteuid on non-linux +func GetParentNSeuid() int { + return os.Geteuid() +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go index c45e300411..c1e634c949 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go @@ -114,3 +114,29 @@ func CurrentUser() (User, error) { func CurrentGroup() (Group, error) { return LookupGid(unix.Getgid()) } + +func CurrentUserSubUIDs() ([]SubID, error) { + u, err := CurrentUser() + if err != nil { + return nil, err + } + return ParseSubIDFileFilter("/etc/subuid", + func(entry SubID) bool { return entry.Name == u.Name }) +} + +func CurrentGroupSubGIDs() ([]SubID, error) { + g, err := CurrentGroup() + if err != nil { + return nil, err + } + return ParseSubIDFileFilter("/etc/subgid", + func(entry SubID) bool { return entry.Name == g.Name }) +} + +func CurrentProcessUIDMap() ([]IDMap, error) { + return ParseIDMapFile("/proc/self/uid_map") +} + +func CurrentProcessGIDMap() ([]IDMap, error) { + return ParseIDMapFile("/proc/self/gid_map") +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go index 93414516ca..37993da833 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go @@ -75,12 +75,29 @@ func groupFromOS(g *user.Group) (Group, error) { return newGroup, nil } +// SubID represents an entry in /etc/sub{u,g}id +type SubID struct { + Name string + SubID int + Count int +} + +// IDMap represents an entry in /proc/PID/{u,g}id_map +type IDMap struct { + ID int + ParentID int + Count int +} + func parseLine(line string, v ...interface{}) { - if line == "" { + parseParts(strings.Split(line, ":"), v...) +} + +func parseParts(parts []string, v ...interface{}) { + if len(parts) == 0 { return } - parts := strings.Split(line, ":") for i, p := range parts { // Ignore cases where we don't have enough fields to populate the arguments. // Some configuration files like to misbehave. @@ -479,3 +496,111 @@ func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int } return GetAdditionalGroups(additionalGroups, group) } + +func ParseSubIDFile(path string) ([]SubID, error) { + subid, err := os.Open(path) + if err != nil { + return nil, err + } + defer subid.Close() + return ParseSubID(subid) +} + +func ParseSubID(subid io.Reader) ([]SubID, error) { + return ParseSubIDFilter(subid, nil) +} + +func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) { + subid, err := os.Open(path) + if err != nil { + return nil, err + } + defer subid.Close() + return ParseSubIDFilter(subid, filter) +} + +func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) { + if r == nil { + return nil, fmt.Errorf("nil source for subid-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []SubID{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + line := strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + // see: man 5 subuid + p := SubID{} + parseLine(line, &p.Name, &p.SubID, &p.Count) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +func ParseIDMapFile(path string) ([]IDMap, error) { + r, err := os.Open(path) + if err != nil { + return nil, err + } + defer r.Close() + return ParseIDMap(r) +} + +func ParseIDMap(r io.Reader) ([]IDMap, error) { + return ParseIDMapFilter(r, nil) +} + +func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) { + r, err := os.Open(path) + if err != nil { + return nil, err + } + defer r.Close() + return ParseIDMapFilter(r, filter) +} + +func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) { + if r == nil { + return nil, fmt.Errorf("nil source for idmap-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []IDMap{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + line := strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + // see: man 7 user_namespaces + p := IDMap{} + parseParts(strings.Fields(line), &p.ID, &p.ParentID, &p.Count) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/LICENSE b/vendor/github.com/opentracing-contrib/go-stdlib/LICENSE new file mode 100644 index 0000000000..c259d12907 --- /dev/null +++ b/vendor/github.com/opentracing-contrib/go-stdlib/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2016, opentracing-contrib +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of go-stdlib nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/README.md b/vendor/github.com/opentracing-contrib/go-stdlib/README.md new file mode 100644 index 0000000000..139709c146 --- /dev/null +++ b/vendor/github.com/opentracing-contrib/go-stdlib/README.md @@ -0,0 +1,18 @@ +# go-stdlib + +This repository contains OpenTracing instrumentation for packages in +the Go standard library. + +For documentation on the packages, +[check godoc](https://godoc.org/github.com/opentracing-contrib/go-stdlib/). + +**The APIs in the various packages are experimental and may change in +the future. You should vendor them to avoid spurious breakage.** + +## Packages + +Instrumentation is provided for the following packages, with the +following caveats: + +- **net/http**: Client and server instrumentation. *Only supported + with Go 1.7 and later.* diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go new file mode 100644 index 0000000000..8d33bcb63c --- /dev/null +++ b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go @@ -0,0 +1,301 @@ +// +build go1.7 + +package nethttp + +import ( + "context" + "io" + "net/http" + "net/http/httptrace" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" +) + +type contextKey int + +const ( + keyTracer contextKey = iota +) + +const defaultComponentName = "net/http" + +// Transport wraps a RoundTripper. If a request is being traced with +// Tracer, Transport will inject the current span into the headers, +// and set HTTP related tags on the span. +type Transport struct { + // The actual RoundTripper to use for the request. A nil + // RoundTripper defaults to http.DefaultTransport. + http.RoundTripper +} + +type clientOptions struct { + operationName string + componentName string + disableClientTrace bool +} + +// ClientOption contols the behavior of TraceRequest. +type ClientOption func(*clientOptions) + +// OperationName returns a ClientOption that sets the operation +// name for the client-side span. +func OperationName(operationName string) ClientOption { + return func(options *clientOptions) { + options.operationName = operationName + } +} + +// ComponentName returns a ClientOption that sets the component +// name for the client-side span. +func ComponentName(componentName string) ClientOption { + return func(options *clientOptions) { + options.componentName = componentName + } +} + +// ClientTrace returns a ClientOption that turns on or off +// extra instrumentation via httptrace.WithClientTrace. +func ClientTrace(enabled bool) ClientOption { + return func(options *clientOptions) { + options.disableClientTrace = !enabled + } +} + +// TraceRequest adds a ClientTracer to req, tracing the request and +// all requests caused due to redirects. When tracing requests this +// way you must also use Transport. +// +// Example: +// +// func AskGoogle(ctx context.Context) error { +// client := &http.Client{Transport: &nethttp.Transport{}} +// req, err := http.NewRequest("GET", "http://google.com", nil) +// if err != nil { +// return err +// } +// req = req.WithContext(ctx) // extend existing trace, if any +// +// req, ht := nethttp.TraceRequest(tracer, req) +// defer ht.Finish() +// +// res, err := client.Do(req) +// if err != nil { +// return err +// } +// res.Body.Close() +// return nil +// } +func TraceRequest(tr opentracing.Tracer, req *http.Request, options ...ClientOption) (*http.Request, *Tracer) { + opts := &clientOptions{} + for _, opt := range options { + opt(opts) + } + ht := &Tracer{tr: tr, opts: opts} + ctx := req.Context() + if !opts.disableClientTrace { + ctx = httptrace.WithClientTrace(ctx, ht.clientTrace()) + } + req = req.WithContext(context.WithValue(ctx, keyTracer, ht)) + return req, ht +} + +type closeTracker struct { + io.ReadCloser + sp opentracing.Span +} + +func (c closeTracker) Close() error { + err := c.ReadCloser.Close() + c.sp.LogFields(log.String("event", "ClosedBody")) + c.sp.Finish() + return err +} + +// RoundTrip implements the RoundTripper interface. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.RoundTripper + if rt == nil { + rt = http.DefaultTransport + } + tracer, ok := req.Context().Value(keyTracer).(*Tracer) + if !ok { + return rt.RoundTrip(req) + } + + tracer.start(req) + + ext.HTTPMethod.Set(tracer.sp, req.Method) + ext.HTTPUrl.Set(tracer.sp, req.URL.String()) + + carrier := opentracing.HTTPHeadersCarrier(req.Header) + tracer.sp.Tracer().Inject(tracer.sp.Context(), opentracing.HTTPHeaders, carrier) + resp, err := rt.RoundTrip(req) + + if err != nil { + tracer.sp.Finish() + return resp, err + } + ext.HTTPStatusCode.Set(tracer.sp, uint16(resp.StatusCode)) + if req.Method == "HEAD" { + tracer.sp.Finish() + } else { + resp.Body = closeTracker{resp.Body, tracer.sp} + } + return resp, nil +} + +// Tracer holds tracing details for one HTTP request. +type Tracer struct { + tr opentracing.Tracer + root opentracing.Span + sp opentracing.Span + opts *clientOptions +} + +func (h *Tracer) start(req *http.Request) opentracing.Span { + if h.root == nil { + parent := opentracing.SpanFromContext(req.Context()) + var spanctx opentracing.SpanContext + if parent != nil { + spanctx = parent.Context() + } + operationName := h.opts.operationName + if operationName == "" { + operationName = "HTTP Client" + } + root := h.tr.StartSpan(operationName, opentracing.ChildOf(spanctx)) + h.root = root + } + + ctx := h.root.Context() + h.sp = h.tr.StartSpan("HTTP "+req.Method, opentracing.ChildOf(ctx)) + ext.SpanKindRPCClient.Set(h.sp) + + componentName := h.opts.componentName + if componentName == "" { + componentName = defaultComponentName + } + ext.Component.Set(h.sp, componentName) + + return h.sp +} + +// Finish finishes the span of the traced request. +func (h *Tracer) Finish() { + if h.root != nil { + h.root.Finish() + } +} + +// Span returns the root span of the traced request. This function +// should only be called after the request has been executed. +func (h *Tracer) Span() opentracing.Span { + return h.root +} + +func (h *Tracer) clientTrace() *httptrace.ClientTrace { + return &httptrace.ClientTrace{ + GetConn: h.getConn, + GotConn: h.gotConn, + PutIdleConn: h.putIdleConn, + GotFirstResponseByte: h.gotFirstResponseByte, + Got100Continue: h.got100Continue, + DNSStart: h.dnsStart, + DNSDone: h.dnsDone, + ConnectStart: h.connectStart, + ConnectDone: h.connectDone, + WroteHeaders: h.wroteHeaders, + Wait100Continue: h.wait100Continue, + WroteRequest: h.wroteRequest, + } +} + +func (h *Tracer) getConn(hostPort string) { + ext.HTTPUrl.Set(h.sp, hostPort) + h.sp.LogFields(log.String("event", "GetConn")) +} + +func (h *Tracer) gotConn(info httptrace.GotConnInfo) { + h.sp.SetTag("net/http.reused", info.Reused) + h.sp.SetTag("net/http.was_idle", info.WasIdle) + h.sp.LogFields(log.String("event", "GotConn")) +} + +func (h *Tracer) putIdleConn(error) { + h.sp.LogFields(log.String("event", "PutIdleConn")) +} + +func (h *Tracer) gotFirstResponseByte() { + h.sp.LogFields(log.String("event", "GotFirstResponseByte")) +} + +func (h *Tracer) got100Continue() { + h.sp.LogFields(log.String("event", "Got100Continue")) +} + +func (h *Tracer) dnsStart(info httptrace.DNSStartInfo) { + h.sp.LogFields( + log.String("event", "DNSStart"), + log.String("host", info.Host), + ) +} + +func (h *Tracer) dnsDone(info httptrace.DNSDoneInfo) { + fields := []log.Field{log.String("event", "DNSDone")} + for _, addr := range info.Addrs { + fields = append(fields, log.String("addr", addr.String())) + } + if info.Err != nil { + fields = append(fields, log.Error(info.Err)) + } + h.sp.LogFields(fields...) +} + +func (h *Tracer) connectStart(network, addr string) { + h.sp.LogFields( + log.String("event", "ConnectStart"), + log.String("network", network), + log.String("addr", addr), + ) +} + +func (h *Tracer) connectDone(network, addr string, err error) { + if err != nil { + h.sp.LogFields( + log.String("message", "ConnectDone"), + log.String("network", network), + log.String("addr", addr), + log.String("event", "error"), + log.Error(err), + ) + } else { + h.sp.LogFields( + log.String("event", "ConnectDone"), + log.String("network", network), + log.String("addr", addr), + ) + } +} + +func (h *Tracer) wroteHeaders() { + h.sp.LogFields(log.String("event", "WroteHeaders")) +} + +func (h *Tracer) wait100Continue() { + h.sp.LogFields(log.String("event", "Wait100Continue")) +} + +func (h *Tracer) wroteRequest(info httptrace.WroteRequestInfo) { + if info.Err != nil { + h.sp.LogFields( + log.String("message", "WroteRequest"), + log.String("event", "error"), + log.Error(info.Err), + ) + ext.Error.Set(h.sp, true) + } else { + h.sp.LogFields(log.String("event", "WroteRequest")) + } +} diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/doc.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/doc.go new file mode 100644 index 0000000000..c853ca6437 --- /dev/null +++ b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/doc.go @@ -0,0 +1,3 @@ +// Package nethttp provides OpenTracing instrumentation for the +// net/http package. +package nethttp diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go new file mode 100644 index 0000000000..2b31415e7e --- /dev/null +++ b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go @@ -0,0 +1,96 @@ +// +build go1.7 + +package nethttp + +import ( + "net/http" + + opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" +) + +type statusCodeTracker struct { + http.ResponseWriter + status int +} + +func (w *statusCodeTracker) WriteHeader(status int) { + w.status = status + w.ResponseWriter.WriteHeader(status) +} + +type mwOptions struct { + opNameFunc func(r *http.Request) string + componentName string +} + +// MWOption controls the behavior of the Middleware. +type MWOption func(*mwOptions) + +// OperationNameFunc returns a MWOption that uses given function f +// to generate operation name for each server-side span. +func OperationNameFunc(f func(r *http.Request) string) MWOption { + return func(options *mwOptions) { + options.opNameFunc = f + } +} + +// MWComponentName returns a MWOption that sets the component name +// name for the server-side span. +func MWComponentName(componentName string) MWOption { + return func(options *mwOptions) { + options.componentName = componentName + } +} + +// Middleware wraps an http.Handler and traces incoming requests. +// Additionally, it adds the span to the request's context. +// +// By default, the operation name of the spans is set to "HTTP {method}". +// This can be overriden with options. +// +// Example: +// http.ListenAndServe("localhost:80", nethttp.Middleware(tracer, http.DefaultServeMux)) +// +// The options allow fine tuning the behavior of the middleware. +// +// Example: +// mw := nethttp.Middleware( +// tracer, +// http.DefaultServeMux, +// nethttp.OperationName(func(r *http.Request) string { +// return "HTTP " + r.Method + ":/api/customers" +// }), +// ) +func Middleware(tr opentracing.Tracer, h http.Handler, options ...MWOption) http.Handler { + opts := mwOptions{ + opNameFunc: func(r *http.Request) string { + return "HTTP " + r.Method + }, + } + for _, opt := range options { + opt(&opts) + } + fn := func(w http.ResponseWriter, r *http.Request) { + ctx, _ := tr.Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(r.Header)) + sp := tr.StartSpan(opts.opNameFunc(r), ext.RPCServerOption(ctx)) + ext.HTTPMethod.Set(sp, r.Method) + ext.HTTPUrl.Set(sp, r.URL.String()) + + // set component name, use "net/http" if caller does not specify + componentName := opts.componentName + if componentName == "" { + componentName = defaultComponentName + } + ext.Component.Set(sp, componentName) + + w = &statusCodeTracker{w, 200} + r = r.WithContext(opentracing.ContextWithSpan(r.Context(), sp)) + + h.ServeHTTP(w, r) + + ext.HTTPStatusCode.Set(sp, uint16(w.(*statusCodeTracker).status)) + sp.Finish() + } + return http.HandlerFunc(fn) +} diff --git a/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go b/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go index 39bfdfee5b..cdd80ec9a7 100644 --- a/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go +++ b/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go @@ -9,5 +9,12 @@ import ( func chtimes(path string, un int64) error { mtime := time.Unix(0, un) + fi, err := os.Lstat(path) + if err != nil { + return err + } + if fi.Mode()&os.ModeSymlink != 0 { + return nil + } return os.Chtimes(path, mtime, mtime) } diff --git a/vendor/github.com/tonistiigi/fsutil/diff.go b/vendor/github.com/tonistiigi/fsutil/diff.go index 6125ef73af..340a0e48a4 100644 --- a/vendor/github.com/tonistiigi/fsutil/diff.go +++ b/vendor/github.com/tonistiigi/fsutil/diff.go @@ -1,10 +1,9 @@ package fsutil import ( + "context" "hash" "os" - - "golang.org/x/net/context" ) type walkerFn func(ctx context.Context, pathC chan<- *currentPath) error diff --git a/vendor/github.com/tonistiigi/fsutil/diff_containerd.go b/vendor/github.com/tonistiigi/fsutil/diff_containerd.go index c7c9788e85..2722cef4ac 100644 --- a/vendor/github.com/tonistiigi/fsutil/diff_containerd.go +++ b/vendor/github.com/tonistiigi/fsutil/diff_containerd.go @@ -1,10 +1,10 @@ package fsutil import ( + "context" "os" "strings" - "golang.org/x/net/context" "golang.org/x/sync/errgroup" ) diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter.go b/vendor/github.com/tonistiigi/fsutil/diskwriter.go index e2d034c75e..aa1974f243 100644 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter.go +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter.go @@ -1,6 +1,7 @@ package fsutil import ( + "context" "hash" "io" "os" @@ -9,9 +10,8 @@ import ( "sync" "time" - digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "golang.org/x/net/context" "golang.org/x/sync/errgroup" ) @@ -80,9 +80,7 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er } }() - p = filepath.FromSlash(p) - - destPath := filepath.Join(dw.dest, p) + destPath := filepath.Join(dw.dest, filepath.FromSlash(p)) if kind == ChangeKindDelete { // todo: no need to validate if diff is trusted but is it always? @@ -102,8 +100,10 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er return errors.Errorf("%s invalid change without stat information", p) } + statCopy := *stat + if dw.filter != nil { - if ok := dw.filter(stat); !ok { + if ok := dw.filter(&statCopy); !ok { return nil } } @@ -122,7 +122,7 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er } if oldFi != nil && fi.IsDir() && oldFi.IsDir() { - if err := rewriteMetadata(destPath, stat); err != nil { + if err := rewriteMetadata(destPath, &statCopy); err != nil { return errors.Wrapf(err, "error setting dir metadata for %s", destPath) } return nil @@ -141,16 +141,16 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er return errors.Wrapf(err, "failed to create dir %s", newPath) } case fi.Mode()&os.ModeDevice != 0 || fi.Mode()&os.ModeNamedPipe != 0: - if err := handleTarTypeBlockCharFifo(newPath, stat); err != nil { + if err := handleTarTypeBlockCharFifo(newPath, &statCopy); err != nil { return errors.Wrapf(err, "failed to create device %s", newPath) } case fi.Mode()&os.ModeSymlink != 0: - if err := os.Symlink(stat.Linkname, newPath); err != nil { + if err := os.Symlink(statCopy.Linkname, newPath); err != nil { return errors.Wrapf(err, "failed to symlink %s", newPath) } - case stat.Linkname != "": - if err := os.Link(filepath.Join(dw.dest, stat.Linkname), newPath); err != nil { - return errors.Wrapf(err, "failed to link %s to %s", newPath, stat.Linkname) + case statCopy.Linkname != "": + if err := os.Link(filepath.Join(dw.dest, statCopy.Linkname), newPath); err != nil { + return errors.Wrapf(err, "failed to link %s to %s", newPath, statCopy.Linkname) } default: isRegularFile = true @@ -170,7 +170,7 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er } } - if err := rewriteMetadata(newPath, stat); err != nil { + if err := rewriteMetadata(newPath, &statCopy); err != nil { return errors.Wrapf(err, "error setting metadata for %s", newPath) } @@ -272,14 +272,27 @@ func (hw *hashedWriter) Digest() digest.Digest { } type lazyFileWriter struct { - dest string - ctx context.Context - f *os.File + dest string + ctx context.Context + f *os.File + fileMode *os.FileMode } func (lfw *lazyFileWriter) Write(dt []byte) (int, error) { if lfw.f == nil { file, err := os.OpenFile(lfw.dest, os.O_WRONLY, 0) //todo: windows + if os.IsPermission(err) { + // retry after chmod + fi, er := os.Stat(lfw.dest) + if er == nil { + mode := fi.Mode() + lfw.fileMode = &mode + er = os.Chmod(lfw.dest, mode|0222) + if er == nil { + file, err = os.OpenFile(lfw.dest, os.O_WRONLY, 0) + } + } + } if err != nil { return 0, errors.Wrapf(err, "failed to open %s", lfw.dest) } @@ -289,10 +302,14 @@ func (lfw *lazyFileWriter) Write(dt []byte) (int, error) { } func (lfw *lazyFileWriter) Close() error { + var err error if lfw.f != nil { - return lfw.f.Close() + err = lfw.f.Close() } - return nil + if err == nil && lfw.fileMode != nil { + err = os.Chmod(lfw.dest, *lfw.fileMode) + } + return err } func mkdev(major int64, minor int64) uint32 { diff --git a/vendor/github.com/tonistiigi/fsutil/followlinks.go b/vendor/github.com/tonistiigi/fsutil/followlinks.go new file mode 100644 index 0000000000..ed4af6e816 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/followlinks.go @@ -0,0 +1,150 @@ +package fsutil + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sort" + strings "strings" + + "github.com/pkg/errors" +) + +func FollowLinks(root string, paths []string) ([]string, error) { + r := &symlinkResolver{root: root, resolved: map[string]struct{}{}} + for _, p := range paths { + if err := r.append(p); err != nil { + return nil, err + } + } + res := make([]string, 0, len(r.resolved)) + for r := range r.resolved { + res = append(res, r) + } + sort.Strings(res) + return dedupePaths(res), nil +} + +type symlinkResolver struct { + root string + resolved map[string]struct{} +} + +func (r *symlinkResolver) append(p string) error { + p = filepath.Join(".", p) + current := "." + for { + parts := strings.SplitN(p, string(filepath.Separator), 2) + current = filepath.Join(current, parts[0]) + + targets, err := r.readSymlink(current, true) + if err != nil { + return err + } + + p = "" + if len(parts) == 2 { + p = parts[1] + } + + if p == "" || targets != nil { + if _, ok := r.resolved[current]; ok { + return nil + } + } + + if targets != nil { + r.resolved[current] = struct{}{} + for _, target := range targets { + if err := r.append(filepath.Join(target, p)); err != nil { + return err + } + } + return nil + } + + if p == "" { + r.resolved[current] = struct{}{} + return nil + } + } +} + +func (r *symlinkResolver) readSymlink(p string, allowWildcard bool) ([]string, error) { + realPath := filepath.Join(r.root, p) + base := filepath.Base(p) + if allowWildcard && containsWildcards(base) { + fis, err := ioutil.ReadDir(filepath.Dir(realPath)) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, errors.Wrapf(err, "failed to read dir %s", filepath.Dir(realPath)) + } + var out []string + for _, f := range fis { + if ok, _ := filepath.Match(base, f.Name()); ok { + res, err := r.readSymlink(filepath.Join(filepath.Dir(p), f.Name()), false) + if err != nil { + return nil, err + } + out = append(out, res...) + } + } + return out, nil + } + + fi, err := os.Lstat(realPath) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, errors.Wrapf(err, "failed to lstat %s", realPath) + } + if fi.Mode()&os.ModeSymlink == 0 { + return nil, nil + } + link, err := os.Readlink(realPath) + if err != nil { + return nil, errors.Wrapf(err, "failed to readlink %s", realPath) + } + link = filepath.Clean(link) + if filepath.IsAbs(link) { + return []string{link}, nil + } + return []string{ + filepath.Join(string(filepath.Separator), filepath.Join(filepath.Dir(p), link)), + }, nil +} + +func containsWildcards(name string) bool { + isWindows := runtime.GOOS == "windows" + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' && !isWindows { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +// dedupePaths expects input as a sorted list +func dedupePaths(in []string) []string { + out := make([]string, 0, len(in)) + var last string + for _, s := range in { + // if one of the paths is root there is no filter + if s == "." { + return nil + } + if strings.HasPrefix(s, last+string(filepath.Separator)) { + continue + } + out = append(out, s) + last = s + } + return out +} diff --git a/vendor/github.com/tonistiigi/fsutil/receive.go b/vendor/github.com/tonistiigi/fsutil/receive.go index 5867c984ee..14ccb6c7bf 100644 --- a/vendor/github.com/tonistiigi/fsutil/receive.go +++ b/vendor/github.com/tonistiigi/fsutil/receive.go @@ -1,12 +1,12 @@ package fsutil import ( + "context" "io" "os" "sync" "github.com/pkg/errors" - "golang.org/x/net/context" "golang.org/x/sync/errgroup" ) diff --git a/vendor/github.com/tonistiigi/fsutil/send.go b/vendor/github.com/tonistiigi/fsutil/send.go index d3cda0bb64..61f6170a4c 100644 --- a/vendor/github.com/tonistiigi/fsutil/send.go +++ b/vendor/github.com/tonistiigi/fsutil/send.go @@ -1,13 +1,13 @@ package fsutil import ( + "context" "io" "os" "path/filepath" "sync" "github.com/pkg/errors" - "golang.org/x/net/context" "golang.org/x/sync/errgroup" ) diff --git a/vendor/github.com/tonistiigi/fsutil/walker.go b/vendor/github.com/tonistiigi/fsutil/walker.go index d05a42dbed..aa50991455 100644 --- a/vendor/github.com/tonistiigi/fsutil/walker.go +++ b/vendor/github.com/tonistiigi/fsutil/walker.go @@ -1,6 +1,7 @@ package fsutil import ( + "context" "os" "path/filepath" "runtime" @@ -9,13 +10,15 @@ import ( "github.com/docker/docker/pkg/fileutils" "github.com/pkg/errors" - "golang.org/x/net/context" ) type WalkOpt struct { IncludePatterns []string ExcludePatterns []string - Map func(*Stat) bool + // FollowPaths contains symlinks that are resolved into include patterns + // before performing the fs walk + FollowPaths []string + Map func(*Stat) bool } func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) error { @@ -39,8 +42,25 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err } } + var includePatterns []string + if opt != nil && opt.IncludePatterns != nil { + includePatterns = make([]string, len(opt.IncludePatterns)) + for k := range opt.IncludePatterns { + includePatterns[k] = filepath.Clean(opt.IncludePatterns[k]) + } + } + if opt != nil && opt.FollowPaths != nil { + targets, err := FollowLinks(p, opt.FollowPaths) + if err != nil { + return err + } + if targets != nil { + includePatterns = append(includePatterns, targets...) + includePatterns = dedupePaths(includePatterns) + } + } + var lastIncludedDir string - var includePatternPrefixes []string seenFiles := make(map[uint64]string) return filepath.Walk(root, func(path string, fi os.FileInfo, err error) (retErr error) { @@ -66,34 +86,34 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err } if opt != nil { - if opt.IncludePatterns != nil { - if includePatternPrefixes == nil { - includePatternPrefixes = patternPrefixes(opt.IncludePatterns) - } - matched := false + if includePatterns != nil { + skip := false if lastIncludedDir != "" { if strings.HasPrefix(path, lastIncludedDir+string(filepath.Separator)) { - matched = true + skip = true } } - if !matched { - for _, p := range opt.IncludePatterns { - if m, _ := filepath.Match(p, path); m { + + if !skip { + matched := false + partial := true + for _, p := range includePatterns { + if ok, p := matchPrefix(p, path); ok { matched = true - break + if !p { + partial = false + break + } } } - if matched && fi.IsDir() { - lastIncludedDir = path - } - } - if !matched { - if !fi.IsDir() { - return nil - } else { - if noPossiblePrefixMatch(path, includePatternPrefixes) { + if !matched { + if fi.IsDir() { return filepath.SkipDir } + return nil + } + if !partial && fi.IsDir() { + lastIncludedDir = path } } } @@ -131,13 +151,13 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err stat := &Stat{ Path: path, Mode: uint32(fi.Mode()), - Size_: fi.Size(), ModTime: fi.ModTime().UnixNano(), } setUnixOpt(fi, stat, path, seenFiles) if !fi.IsDir() { + stat.Size_ = fi.Size() if fi.Mode()&os.ModeSymlink != 0 { link, err := os.Readlink(origpath) if err != nil { @@ -199,29 +219,28 @@ func (s *StatInfo) Sys() interface{} { return s.Stat } -func patternPrefixes(patterns []string) []string { - pfxs := make([]string, 0, len(patterns)) - for _, ptrn := range patterns { - idx := strings.IndexFunc(ptrn, func(ch rune) bool { - return ch == '*' || ch == '?' || ch == '[' || ch == '\\' - }) - if idx == -1 { - idx = len(ptrn) - } - pfxs = append(pfxs, ptrn[:idx]) +func matchPrefix(pattern, name string) (bool, bool) { + count := strings.Count(name, string(filepath.Separator)) + partial := false + if strings.Count(pattern, string(filepath.Separator)) > count { + pattern = trimUntilIndex(pattern, string(filepath.Separator), count) + partial = true } - return pfxs + m, _ := filepath.Match(pattern, name) + return m, partial } -func noPossiblePrefixMatch(p string, pfxs []string) bool { - for _, pfx := range pfxs { - chk := p - if len(pfx) < len(p) { - chk = p[:len(pfx)] - } - if strings.HasPrefix(pfx, chk) { - return false +func trimUntilIndex(str, sep string, count int) string { + s := str + i := 0 + c := 0 + for { + idx := strings.Index(s, sep) + s = s[idx+len(sep):] + i += idx + len(sep) + c++ + if c > count { + return str[:i-len(sep)] } } - return true }