Merge pull request #37151 from tonistiigi/experimental-buildkit

Experimental BuildKit support
This commit is contained in:
Tibor Vass 2018-06-12 13:31:48 -07:00 committed by GitHub
commit c752b0991e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
213 changed files with 38196 additions and 243 deletions

View file

@ -31,6 +31,7 @@ DOCKER_ENVS := \
-e DOCKER_BUILD_ARGS \
-e DOCKER_BUILD_GOGC \
-e DOCKER_BUILD_PKGS \
-e DOCKER_BUILDKIT \
-e DOCKER_BASH_COMPLETION_PATH \
-e DOCKER_CLI_PATH \
-e DOCKER_DEBUG \

View file

@ -8,10 +8,12 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/builder"
buildkit "github.com/docker/docker/builder/builder-next"
"github.com/docker/docker/builder/fscache"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/stringid"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
// ImageComponent provides an interface for working with images
@ -30,24 +32,39 @@ type Backend struct {
builder Builder
fsCache *fscache.FSCache
imageComponent ImageComponent
buildkit *buildkit.Builder
}
// NewBackend creates a new build backend from components
func NewBackend(components ImageComponent, builder Builder, fsCache *fscache.FSCache) (*Backend, error) {
return &Backend{imageComponent: components, builder: builder, fsCache: fsCache}, nil
func NewBackend(components ImageComponent, builder Builder, fsCache *fscache.FSCache, buildkit *buildkit.Builder) (*Backend, error) {
return &Backend{imageComponent: components, builder: builder, fsCache: fsCache, buildkit: buildkit}, nil
}
// Build builds an image from a Source
func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string, error) {
options := config.Options
useBuildKit := options.Version == types.BuilderBuildKit
tagger, err := NewTagger(b.imageComponent, config.ProgressWriter.StdoutFormatter, options.Tags)
if err != nil {
return "", err
}
build, err := b.builder.Build(ctx, config)
if err != nil {
return "", err
var build *builder.Result
if useBuildKit {
build, err = b.buildkit.Build(ctx, config)
if err != nil {
return "", err
}
} else {
build, err = b.builder.Build(ctx, config)
if err != nil {
return "", err
}
}
if build == nil {
return "", nil
}
var imageID = build.ImageID
@ -62,19 +79,48 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string
}
}
stdout := config.ProgressWriter.StdoutFormatter
fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID))
err = tagger.TagImages(image.ID(imageID))
if !useBuildKit {
stdout := config.ProgressWriter.StdoutFormatter
fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID))
err = tagger.TagImages(image.ID(imageID))
}
return imageID, err
}
// PruneCache removes all cached build sources
func (b *Backend) PruneCache(ctx context.Context) (*types.BuildCachePruneReport, error) {
size, err := b.fsCache.Prune(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to prune build cache")
eg, ctx := errgroup.WithContext(ctx)
var fsCacheSize uint64
eg.Go(func() error {
var err error
fsCacheSize, err = b.fsCache.Prune(ctx)
if err != nil {
return errors.Wrap(err, "failed to prune fscache")
}
return nil
})
var buildCacheSize int64
eg.Go(func() error {
var err error
buildCacheSize, err = b.buildkit.Prune(ctx)
if err != nil {
return errors.Wrap(err, "failed to prune build cache")
}
return nil
})
if err := eg.Wait(); err != nil {
return nil, err
}
return &types.BuildCachePruneReport{SpaceReclaimed: size}, nil
return &types.BuildCachePruneReport{SpaceReclaimed: fsCacheSize + uint64(buildCacheSize)}, nil
}
// Cancel cancels the build by ID
func (b *Backend) Cancel(ctx context.Context, id string) error {
return b.buildkit.Cancel(ctx, id)
}
func squashBuild(build *builder.Result, imageComponent ImageComponent) (string, error) {

View file

@ -15,6 +15,8 @@ type Backend interface {
// Prune build cache
PruneCache(context.Context) (*types.BuildCachePruneReport, error)
Cancel(context.Context, string) error
}
type experimentalProvider interface {

View file

@ -25,5 +25,6 @@ func (r *buildRouter) initRoutes() {
r.routes = []router.Route{
router.NewPostRoute("/build", r.postBuild, router.WithCancel),
router.NewPostRoute("/build/prune", r.postPrune, router.WithCancel),
router.NewPostRoute("/build/cancel", r.postCancel),
}
}

View file

@ -1,6 +1,7 @@
package build // import "github.com/docker/docker/api/server/router/build"
import (
"bufio"
"bytes"
"context"
"encoding/base64"
@ -145,10 +146,26 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
options.CacheFrom = cacheFrom
}
options.SessionID = r.FormValue("session")
options.BuildID = r.FormValue("buildid")
builderVersion, err := parseVersion(r.FormValue("version"))
if err != nil {
return nil, err
}
options.Version = builderVersion
return options, nil
}
func parseVersion(s string) (types.BuilderVersion, error) {
if s == "" || s == string(types.BuilderV1) {
return types.BuilderV1, nil
}
if s == string(types.BuilderBuildKit) {
return types.BuilderBuildKit, nil
}
return "", errors.Errorf("invalid version %s", s)
}
func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
report, err := br.backend.PruneCache(ctx)
if err != nil {
@ -157,6 +174,17 @@ func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r *
return httputils.WriteJSON(w, http.StatusOK, report)
}
func (br *buildRouter) postCancel(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
w.Header().Set("Content-Type", "application/json")
id := r.FormValue("id")
if id == "" {
return errors.Errorf("build ID not provided")
}
return br.backend.Cancel(ctx, id)
}
func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var (
notVerboseBuffer = bytes.NewBuffer(nil)
@ -165,18 +193,34 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
w.Header().Set("Content-Type", "application/json")
output := ioutils.NewWriteFlusher(w)
body := r.Body
var ww io.Writer = w
if body != nil {
// there is a possibility that output is written before request body
// has been fully read so we need to protect against it.
// this can be removed when
// https://github.com/golang/go/issues/15527
// https://github.com/golang/go/issues/22209
// has been fixed
body, ww = wrapOutputBufferedUntilRequestRead(body, ww)
}
output := ioutils.NewWriteFlusher(ww)
defer output.Close()
errf := func(err error) error {
if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 {
output.Write(notVerboseBuffer.Bytes())
}
logrus.Debugf("isflushed %v", output.Flushed())
// Do not write the error in the http output if it's still empty.
// This prevents from writing a 200(OK) when there is an internal error.
if !output.Flushed() {
return err
}
_, err = w.Write(streamformatter.FormatError(err))
_, err = output.Write(streamformatter.FormatError(err))
if err != nil {
logrus.Warnf("could not write error response: %v", err)
}
@ -205,10 +249,14 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", buildOptions.RemoteContext)
}
if buildOptions.Version == types.BuilderBuildKit && !br.daemon.HasExperimental() {
return errdefs.InvalidParameter(errors.New("buildkit is only supported with experimental mode"))
}
wantAux := versions.GreaterThanOrEqualTo(version, "1.30")
imgID, err := br.backend.Build(ctx, backend.BuildConfig{
Source: r.Body,
Source: body,
Options: buildOptions,
ProgressWriter: buildProgressWriter(out, wantAux, createProgressReader),
})
@ -267,3 +315,102 @@ func buildProgressWriter(out io.Writer, wantAux bool, createProgressReader func(
ProgressReaderFunc: createProgressReader,
}
}
type flusher interface {
Flush()
}
func wrapOutputBufferedUntilRequestRead(rc io.ReadCloser, out io.Writer) (io.ReadCloser, io.Writer) {
var fl flusher = &ioutils.NopFlusher{}
if f, ok := out.(flusher); ok {
fl = f
}
w := &wcf{
buf: bytes.NewBuffer(nil),
Writer: out,
flusher: fl,
}
r := bufio.NewReader(rc)
_, err := r.Peek(1)
if err != nil {
return rc, out
}
rc = &rcNotifier{
Reader: r,
Closer: rc,
notify: w.notify,
}
return rc, w
}
type rcNotifier struct {
io.Reader
io.Closer
notify func()
}
func (r *rcNotifier) Read(b []byte) (int, error) {
n, err := r.Reader.Read(b)
if err != nil {
r.notify()
}
return n, err
}
func (r *rcNotifier) Close() error {
r.notify()
return r.Closer.Close()
}
type wcf struct {
io.Writer
flusher
mu sync.Mutex
ready bool
buf *bytes.Buffer
flushed bool
}
func (w *wcf) Flush() {
w.mu.Lock()
w.flushed = true
if !w.ready {
w.mu.Unlock()
return
}
w.mu.Unlock()
w.flusher.Flush()
}
func (w *wcf) Flushed() bool {
w.mu.Lock()
b := w.flushed
w.mu.Unlock()
return b
}
func (w *wcf) Write(b []byte) (int, error) {
w.mu.Lock()
if !w.ready {
n, err := w.buf.Write(b)
w.mu.Unlock()
return n, err
}
w.mu.Unlock()
return w.Writer.Write(b)
}
func (w *wcf) notify() {
w.mu.Lock()
if !w.ready {
if w.buf.Len() > 0 {
io.Copy(w.Writer, w.buf)
}
if w.flushed {
w.flusher.Flush()
}
w.ready = true
}
w.mu.Unlock()
}

View file

@ -2,6 +2,7 @@ package system // import "github.com/docker/docker/api/server/router/system"
import (
"github.com/docker/docker/api/server/router"
buildkit "github.com/docker/docker/builder/builder-next"
"github.com/docker/docker/builder/fscache"
)
@ -11,15 +12,17 @@ type systemRouter struct {
backend Backend
cluster ClusterBackend
routes []router.Route
builder *fscache.FSCache
fscache *fscache.FSCache // legacy
builder *buildkit.Builder
}
// NewRouter initializes a new system router
func NewRouter(b Backend, c ClusterBackend, fscache *fscache.FSCache) router.Router {
func NewRouter(b Backend, c ClusterBackend, fscache *fscache.FSCache, builder *buildkit.Builder) router.Router {
r := &systemRouter{
backend: b,
cluster: c,
builder: fscache,
fscache: fscache,
builder: builder,
}
r.routes = []router.Route{

View file

@ -17,6 +17,7 @@ import (
"github.com/docker/docker/pkg/ioutils"
pkgerrors "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@ -69,15 +70,45 @@ func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r
}
func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
du, err := s.backend.SystemDiskUsage(ctx)
if err != nil {
eg, ctx := errgroup.WithContext(ctx)
var du *types.DiskUsage
eg.Go(func() error {
var err error
du, err = s.backend.SystemDiskUsage(ctx)
return err
})
var builderSize int64 // legacy
eg.Go(func() error {
var err error
builderSize, err = s.fscache.DiskUsage(ctx)
if err != nil {
return pkgerrors.Wrap(err, "error getting fscache build cache usage")
}
return nil
})
var buildCache []*types.BuildCache
eg.Go(func() error {
var err error
buildCache, err = s.builder.DiskUsage(ctx)
if err != nil {
return pkgerrors.Wrap(err, "error getting build cache usage")
}
return nil
})
if err := eg.Wait(); err != nil {
return err
}
builderSize, err := s.builder.DiskUsage(ctx)
if err != nil {
return pkgerrors.Wrap(err, "error getting build cache usage")
for _, b := range buildCache {
builderSize += b.Size
}
du.BuilderSize = builderSize
du.BuildCache = buildCache
return httputils.WriteJSON(w, http.StatusOK, du)
}

View file

@ -181,8 +181,24 @@ type ImageBuildOptions struct {
Target string
SessionID string
Platform string
// Version specifies the version of the unerlying builder to use
Version BuilderVersion
// BuildID is an optional identifier that can be passed together with the
// build request. The same identifier can be used to gracefully cancel the
// build with the cancel request.
BuildID string
}
// BuilderVersion sets the version of underlying builder to use
type BuilderVersion string
const (
// BuilderV1 is the first generation builder in docker daemon
BuilderV1 BuilderVersion = "1"
// BuilderBuildKit is builder based on moby/buildkit project
BuilderBuildKit = "2"
)
// ImageBuildResponse holds information
// returned by a server after building
// an image.

View file

@ -512,7 +512,8 @@ type DiskUsage struct {
Images []*ImageSummary
Containers []*Container
Volumes []*Volume
BuilderSize int64
BuildCache []*BuildCache
BuilderSize int64 // deprecated
}
// ContainersPruneReport contains the response for Engine API:
@ -585,3 +586,17 @@ type PushResult struct {
type BuildResult struct {
ID string
}
// BuildCache contains information about a build cache record
type BuildCache struct {
ID string
Mutable bool
InUse bool
Size int64
CreatedAt time.Time
LastUsedAt *time.Time
UsageCount int
Parent string
Description string
}

View file

@ -0,0 +1,724 @@
package containerimage
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"runtime"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
ctdreference "github.com/containerd/containerd/reference"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/containerd/containerd/remotes/docker/schema1"
distreference "github.com/docker/distribution/reference"
"github.com/docker/docker/distribution"
"github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
pkgprogress "github.com/docker/docker/pkg/progress"
"github.com/docker/docker/reference"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/auth"
"github.com/moby/buildkit/source"
"github.com/moby/buildkit/util/flightcontrol"
"github.com/moby/buildkit/util/imageutil"
"github.com/moby/buildkit/util/progress"
"github.com/moby/buildkit/util/tracing"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/identity"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"golang.org/x/time/rate"
)
const preferLocal = true // FIXME: make this optional from the op
// SourceOpt is options for creating the image source
type SourceOpt struct {
SessionManager *session.Manager
ContentStore content.Store
CacheAccessor cache.Accessor
ReferenceStore reference.Store
DownloadManager distribution.RootFSDownloadManager
MetadataStore metadata.V2MetadataService
ImageStore image.Store
}
type imageSource struct {
SourceOpt
g flightcontrol.Group
}
// NewSource creates a new image source
func NewSource(opt SourceOpt) (source.Source, error) {
is := &imageSource{
SourceOpt: opt,
}
return is, nil
}
func (is *imageSource) ID() string {
return source.DockerImageScheme
}
func (is *imageSource) getResolver(ctx context.Context) remotes.Resolver {
return docker.NewResolver(docker.ResolverOptions{
Client: tracing.DefaultClient,
Credentials: is.getCredentialsFromSession(ctx),
})
}
func (is *imageSource) getCredentialsFromSession(ctx context.Context) func(string) (string, string, error) {
id := session.FromContext(ctx)
if id == "" {
return nil
}
return func(host string) (string, string, error) {
timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
caller, err := is.SessionManager.Get(timeoutCtx, id)
if err != nil {
return "", "", err
}
return auth.CredentialsFunc(tracing.ContextWithSpanFromContext(context.TODO(), ctx), caller)(host)
}
}
func (is *imageSource) resolveLocal(refStr string) ([]byte, error) {
ref, err := distreference.ParseNormalizedNamed(refStr)
if err != nil {
return nil, err
}
dgst, err := is.ReferenceStore.Get(ref)
if err != nil {
return nil, err
}
img, err := is.ImageStore.Get(image.ID(dgst))
if err != nil {
return nil, err
}
return img.RawJSON(), nil
}
func (is *imageSource) ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error) {
if preferLocal {
dt, err := is.resolveLocal(ref)
if err == nil {
return "", dt, nil
}
}
type t struct {
dgst digest.Digest
dt []byte
}
res, err := is.g.Do(ctx, ref, func(ctx context.Context) (interface{}, error) {
dgst, dt, err := imageutil.Config(ctx, ref, is.getResolver(ctx), is.ContentStore, "")
if err != nil {
return nil, err
}
return &t{dgst: dgst, dt: dt}, nil
})
if err != nil {
return "", nil, err
}
typed := res.(*t)
return typed.dgst, typed.dt, nil
}
func (is *imageSource) Resolve(ctx context.Context, id source.Identifier) (source.SourceInstance, error) {
imageIdentifier, ok := id.(*source.ImageIdentifier)
if !ok {
return nil, errors.Errorf("invalid image identifier %v", id)
}
p := &puller{
src: imageIdentifier,
is: is,
resolver: is.getResolver(ctx),
}
return p, nil
}
type puller struct {
is *imageSource
resolveOnce sync.Once
resolveLocalOnce sync.Once
src *source.ImageIdentifier
desc ocispec.Descriptor
ref string
resolveErr error
resolver remotes.Resolver
config []byte
}
func (p *puller) mainManifestKey(dgst digest.Digest) (digest.Digest, error) {
dt, err := json.Marshal(struct {
Digest digest.Digest
OS string
Arch string
}{
Digest: p.desc.Digest,
OS: runtime.GOOS,
Arch: runtime.GOARCH,
})
if err != nil {
return "", err
}
return digest.FromBytes(dt), nil
}
func (p *puller) resolveLocal() {
p.resolveLocalOnce.Do(func() {
dgst := p.src.Reference.Digest()
if dgst != "" {
info, err := p.is.ContentStore.Info(context.TODO(), dgst)
if err == nil {
p.ref = p.src.Reference.String()
desc := ocispec.Descriptor{
Size: info.Size,
Digest: dgst,
}
ra, err := p.is.ContentStore.ReaderAt(context.TODO(), desc)
if err == nil {
mt, err := imageutil.DetectManifestMediaType(ra)
if err == nil {
desc.MediaType = mt
p.desc = desc
}
}
}
}
if preferLocal {
dt, err := p.is.resolveLocal(p.src.Reference.String())
if err == nil {
p.config = dt
}
}
})
}
func (p *puller) resolve(ctx context.Context) error {
p.resolveOnce.Do(func() {
resolveProgressDone := oneOffProgress(ctx, "resolve "+p.src.Reference.String())
ref, err := distreference.ParseNormalizedNamed(p.src.Reference.String())
if err != nil {
p.resolveErr = err
resolveProgressDone(err)
return
}
if p.desc.Digest == "" && p.config == nil {
origRef, desc, err := p.resolver.Resolve(ctx, ref.String())
if err != nil {
p.resolveErr = err
resolveProgressDone(err)
return
}
p.desc = desc
p.ref = origRef
}
// Schema 1 manifests cannot be resolved to an image config
// since the conversion must take place after all the content
// has been read.
// It may be possible to have a mapping between schema 1 manifests
// and the schema 2 manifests they are converted to.
if p.config == nil && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest {
ref, err := distreference.WithDigest(ref, p.desc.Digest)
if err != nil {
p.resolveErr = err
resolveProgressDone(err)
return
}
_, dt, err := p.is.ResolveImageConfig(ctx, ref.String())
if err != nil {
p.resolveErr = err
resolveProgressDone(err)
return
}
p.config = dt
}
resolveProgressDone(nil)
})
return p.resolveErr
}
func (p *puller) CacheKey(ctx context.Context, index int) (string, bool, error) {
p.resolveLocal()
if p.desc.Digest != "" && index == 0 {
dgst, err := p.mainManifestKey(p.desc.Digest)
if err != nil {
return "", false, err
}
return dgst.String(), false, nil
}
if p.config != nil {
return cacheKeyFromConfig(p.config).String(), true, nil
}
if err := p.resolve(ctx); err != nil {
return "", false, err
}
if p.desc.Digest != "" && index == 0 {
dgst, err := p.mainManifestKey(p.desc.Digest)
if err != nil {
return "", false, err
}
return dgst.String(), false, nil
}
return cacheKeyFromConfig(p.config).String(), true, nil
}
func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
p.resolveLocal()
if err := p.resolve(ctx); err != nil {
return nil, err
}
if p.config != nil {
img, err := p.is.ImageStore.Get(image.ID(digest.FromBytes(p.config)))
if err == nil {
if len(img.RootFS.DiffIDs) == 0 {
return nil, nil
}
ref, err := p.is.CacheAccessor.GetFromSnapshotter(ctx, string(img.RootFS.ChainID()), cache.WithDescription(fmt.Sprintf("from local %s", p.ref)))
if err != nil {
return nil, err
}
return ref, nil
}
}
ongoing := newJobs(p.ref)
pctx, stopProgress := context.WithCancel(ctx)
pw, _, ctx := progress.FromContext(ctx)
defer pw.Close()
progressDone := make(chan struct{})
go func() {
showProgress(pctx, ongoing, p.is.ContentStore, pw)
close(progressDone)
}()
defer func() {
<-progressDone
}()
fetcher, err := p.resolver.Fetcher(ctx, p.ref)
if err != nil {
stopProgress()
return nil, err
}
var (
schema1Converter *schema1.Converter
handlers []images.Handler
)
if p.desc.MediaType == images.MediaTypeDockerSchema1Manifest {
schema1Converter = schema1.NewConverter(p.is.ContentStore, fetcher)
handlers = append(handlers, schema1Converter)
// TODO: Optimize to do dispatch and integrate pulling with download manager,
// leverage existing blob mapping and layer storage
} else {
// TODO: need a wrapper snapshot interface that combines content
// and snapshots as 1) buildkit shouldn't have a dependency on contentstore
// or 2) cachemanager should manage the contentstore
handlers = append(handlers, images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex,
images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
default:
return nil, images.ErrSkipDesc
}
ongoing.add(desc)
return nil, nil
}))
// Get all the children for a descriptor
childrenHandler := images.ChildrenHandler(p.is.ContentStore)
// Set any children labels for that content
childrenHandler = images.SetChildrenLabels(p.is.ContentStore, childrenHandler)
// Filter the childen by the platform
childrenHandler = images.FilterPlatforms(childrenHandler, platforms.Default())
handlers = append(handlers,
remotes.FetchHandler(p.is.ContentStore, fetcher),
childrenHandler,
)
}
if err := images.Dispatch(ctx, images.Handlers(handlers...), p.desc); err != nil {
stopProgress()
return nil, err
}
defer stopProgress()
if schema1Converter != nil {
p.desc, err = schema1Converter.Convert(ctx)
if err != nil {
return nil, err
}
}
mfst, err := images.Manifest(ctx, p.is.ContentStore, p.desc, platforms.Default())
if err != nil {
return nil, err
}
config, err := images.Config(ctx, p.is.ContentStore, p.desc, platforms.Default())
if err != nil {
return nil, err
}
dt, err := content.ReadBlob(ctx, p.is.ContentStore, config)
if err != nil {
return nil, err
}
var img ocispec.Image
if err := json.Unmarshal(dt, &img); err != nil {
return nil, err
}
if len(mfst.Layers) != len(img.RootFS.DiffIDs) {
return nil, errors.Errorf("invalid config for manifest")
}
pchan := make(chan pkgprogress.Progress, 10)
defer close(pchan)
go func() {
m := map[string]struct {
st time.Time
limiter *rate.Limiter
}{}
for p := range pchan {
if p.Action == "Extracting" {
st, ok := m[p.ID]
if !ok {
st.st = time.Now()
st.limiter = rate.NewLimiter(rate.Every(100*time.Millisecond), 1)
m[p.ID] = st
}
var end *time.Time
if p.LastUpdate || st.limiter.Allow() {
if p.LastUpdate {
tm := time.Now()
end = &tm
}
pw.Write("extracting "+p.ID, progress.Status{
Action: "extract",
Started: &st.st,
Completed: end,
})
}
}
}
}()
if len(mfst.Layers) == 0 {
return nil, nil
}
layers := make([]xfer.DownloadDescriptor, 0, len(mfst.Layers))
for i, desc := range mfst.Layers {
ongoing.add(desc)
layers = append(layers, &layerDescriptor{
desc: desc,
diffID: layer.DiffID(img.RootFS.DiffIDs[i]),
fetcher: fetcher,
ref: p.src.Reference,
is: p.is,
})
}
defer func() {
<-progressDone
for _, desc := range mfst.Layers {
p.is.ContentStore.Delete(context.TODO(), desc.Digest)
}
}()
r := image.NewRootFS()
rootFS, release, err := p.is.DownloadManager.Download(ctx, *r, runtime.GOOS, layers, pkgprogress.ChanOutput(pchan))
if err != nil {
return nil, err
}
stopProgress()
ref, err := p.is.CacheAccessor.GetFromSnapshotter(ctx, string(rootFS.ChainID()), cache.WithDescription(fmt.Sprintf("pulled from %s", p.ref)))
release()
if err != nil {
return nil, err
}
return ref, nil
}
// Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
type layerDescriptor struct {
is *imageSource
fetcher remotes.Fetcher
desc ocispec.Descriptor
diffID layer.DiffID
ref ctdreference.Spec
}
func (ld *layerDescriptor) Key() string {
return "v2:" + ld.desc.Digest.String()
}
func (ld *layerDescriptor) ID() string {
return ld.desc.Digest.String()
}
func (ld *layerDescriptor) DiffID() (layer.DiffID, error) {
return ld.diffID, nil
}
func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) {
rc, err := ld.fetcher.Fetch(ctx, ld.desc)
if err != nil {
return nil, 0, err
}
defer rc.Close()
refKey := remotes.MakeRefKey(ctx, ld.desc)
ld.is.ContentStore.Abort(ctx, refKey)
if err := content.WriteBlob(ctx, ld.is.ContentStore, refKey, rc, ld.desc); err != nil {
ld.is.ContentStore.Abort(ctx, refKey)
return nil, 0, err
}
ra, err := ld.is.ContentStore.ReaderAt(ctx, ld.desc)
if err != nil {
return nil, 0, err
}
return ioutil.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
}
func (ld *layerDescriptor) Close() {
// ld.is.ContentStore.Delete(context.TODO(), ld.desc.Digest))
}
func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
// Cache mapping from this layer's DiffID to the blobsum
ld.is.MetadataStore.Add(diffID, metadata.V2Metadata{Digest: ld.desc.Digest, SourceRepository: ld.ref.Locator})
}
func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, pw progress.Writer) {
var (
ticker = time.NewTicker(100 * time.Millisecond)
statuses = map[string]statusInfo{}
done bool
)
defer ticker.Stop()
for {
select {
case <-ticker.C:
case <-ctx.Done():
done = true
}
resolved := "resolved"
if !ongoing.isResolved() {
resolved = "resolving"
}
statuses[ongoing.name] = statusInfo{
Ref: ongoing.name,
Status: resolved,
}
actives := make(map[string]statusInfo)
if !done {
active, err := cs.ListStatuses(ctx)
if err != nil {
// log.G(ctx).WithError(err).Error("active check failed")
continue
}
// update status of active entries!
for _, active := range active {
actives[active.Ref] = statusInfo{
Ref: active.Ref,
Status: "downloading",
Offset: active.Offset,
Total: active.Total,
StartedAt: active.StartedAt,
UpdatedAt: active.UpdatedAt,
}
}
}
// now, update the items in jobs that are not in active
for _, j := range ongoing.jobs() {
refKey := remotes.MakeRefKey(ctx, j.Descriptor)
if a, ok := actives[refKey]; ok {
started := j.started
pw.Write(j.Digest.String(), progress.Status{
Action: a.Status,
Total: int(a.Total),
Current: int(a.Offset),
Started: &started,
})
continue
}
if !j.done {
info, err := cs.Info(context.TODO(), j.Digest)
if err != nil {
if errdefs.IsNotFound(err) {
// pw.Write(j.Digest.String(), progress.Status{
// Action: "waiting",
// })
continue
}
} else {
j.done = true
}
if done || j.done {
started := j.started
createdAt := info.CreatedAt
pw.Write(j.Digest.String(), progress.Status{
Action: "done",
Current: int(info.Size),
Total: int(info.Size),
Completed: &createdAt,
Started: &started,
})
}
}
}
if done {
return
}
}
}
// jobs provides a way of identifying the download keys for a particular task
// encountering during the pull walk.
//
// This is very minimal and will probably be replaced with something more
// featured.
type jobs struct {
name string
added map[digest.Digest]job
mu sync.Mutex
resolved bool
}
type job struct {
ocispec.Descriptor
done bool
started time.Time
}
func newJobs(name string) *jobs {
return &jobs{
name: name,
added: make(map[digest.Digest]job),
}
}
func (j *jobs) add(desc ocispec.Descriptor) {
j.mu.Lock()
defer j.mu.Unlock()
if _, ok := j.added[desc.Digest]; ok {
return
}
j.added[desc.Digest] = job{
Descriptor: desc,
started: time.Now(),
}
}
func (j *jobs) jobs() []job {
j.mu.Lock()
defer j.mu.Unlock()
descs := make([]job, 0, len(j.added))
for _, j := range j.added {
descs = append(descs, j)
}
return descs
}
func (j *jobs) isResolved() bool {
j.mu.Lock()
defer j.mu.Unlock()
return j.resolved
}
type statusInfo struct {
Ref string
Status string
Offset int64
Total int64
StartedAt time.Time
UpdatedAt time.Time
}
func oneOffProgress(ctx context.Context, id string) func(err error) error {
pw, _, _ := progress.FromContext(ctx)
now := time.Now()
st := progress.Status{
Started: &now,
}
pw.Write(id, st)
return func(err error) error {
// TODO: set error on status
now := time.Now()
st.Completed = &now
pw.Write(id, st)
pw.Close()
return err
}
}
// cacheKeyFromConfig returns a stable digest from image config. If image config
// is a known oci image we will use chainID of layers.
func cacheKeyFromConfig(dt []byte) digest.Digest {
var img ocispec.Image
err := json.Unmarshal(dt, &img)
if err != nil {
return digest.FromBytes(dt)
}
if img.RootFS.Type != "layers" {
return digest.FromBytes(dt)
}
return identity.ChainID(img.RootFS.DiffIDs)
}

View file

@ -0,0 +1,113 @@
package snapshot
import (
"context"
"os"
"path/filepath"
"github.com/boltdb/bolt"
"github.com/docker/docker/layer"
"github.com/docker/docker/pkg/ioutils"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
func (s *snapshotter) EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error) {
if l, err := s.getLayer(key, true); err != nil {
return nil, err
} else if l != nil {
return getDiffChain(l), nil
}
id, committed := s.getGraphDriverID(key)
if !committed {
return nil, errors.Errorf("can not convert active %s to layer", key)
}
info, err := s.Stat(ctx, key)
if err != nil {
return nil, err
}
eg, gctx := errgroup.WithContext(ctx)
// TODO: add flightcontrol
var parentChainID layer.ChainID
if info.Parent != "" {
eg.Go(func() error {
diffIDs, err := s.EnsureLayer(gctx, info.Parent)
if err != nil {
return err
}
parentChainID = layer.CreateChainID(diffIDs)
return nil
})
}
tmpDir, err := ioutils.TempDir("", "docker-tarsplit")
if err != nil {
return nil, err
}
defer os.RemoveAll(tmpDir)
tarSplitPath := filepath.Join(tmpDir, "tar-split")
var diffID layer.DiffID
var size int64
eg.Go(func() error {
parent := ""
if p := info.Parent; p != "" {
if l, err := s.getLayer(p, true); err != nil {
return err
} else if l != nil {
parent, err = getGraphID(l)
if err != nil {
return err
}
} else {
parent, _ = s.getGraphDriverID(info.Parent)
}
}
diffID, size, err = s.reg.ChecksumForGraphID(id, parent, "", tarSplitPath)
return err
})
if err := eg.Wait(); err != nil {
return nil, err
}
l, err := s.reg.RegisterByGraphID(id, parentChainID, diffID, tarSplitPath, size)
if err != nil {
return nil, err
}
if err := s.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(key))
b.Put(keyChainID, []byte(l.ChainID()))
return nil
}); err != nil {
return nil, err
}
s.mu.Lock()
s.refs[key] = l
s.mu.Unlock()
return getDiffChain(l), nil
}
func getDiffChain(l layer.Layer) []layer.DiffID {
if p := l.Parent(); p != nil {
return append(getDiffChain(p), l.DiffID())
}
return []layer.DiffID{l.DiffID()}
}
func getGraphID(l layer.Layer) (string, error) {
if l, ok := l.(interface {
CacheID() string
}); ok {
return l.CacheID(), nil
}
return "", errors.Errorf("couldn't access cacheID for %s", l.ChainID())
}

View file

@ -0,0 +1,445 @@
package snapshot
import (
"context"
"path/filepath"
"strconv"
"strings"
"sync"
"github.com/boltdb/bolt"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/snapshots"
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/layer"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/snapshot"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
var keyParent = []byte("parent")
var keyCommitted = []byte("committed")
var keyChainID = []byte("chainid")
var keySize = []byte("size")
// Opt defines options for creating the snapshotter
type Opt struct {
GraphDriver graphdriver.Driver
LayerStore layer.Store
Root string
}
type graphIDRegistrar interface {
RegisterByGraphID(string, layer.ChainID, layer.DiffID, string, int64) (layer.Layer, error)
Release(layer.Layer) ([]layer.Metadata, error)
checksumCalculator
}
type checksumCalculator interface {
ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID layer.DiffID, size int64, err error)
}
type snapshotter struct {
opt Opt
refs map[string]layer.Layer
db *bolt.DB
mu sync.Mutex
reg graphIDRegistrar
}
var _ snapshot.SnapshotterBase = &snapshotter{}
// NewSnapshotter creates a new snapshotter
func NewSnapshotter(opt Opt) (snapshot.SnapshotterBase, error) {
dbPath := filepath.Join(opt.Root, "snapshots.db")
db, err := bolt.Open(dbPath, 0600, nil)
if err != nil {
return nil, errors.Wrapf(err, "failed to open database file %s", dbPath)
}
reg, ok := opt.LayerStore.(graphIDRegistrar)
if !ok {
return nil, errors.Errorf("layerstore doesn't support graphID registration")
}
s := &snapshotter{
opt: opt,
db: db,
refs: map[string]layer.Layer{},
reg: reg,
}
return s, nil
}
func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) error {
origParent := parent
if parent != "" {
if l, err := s.getLayer(parent, false); err != nil {
return err
} else if l != nil {
parent, err = getGraphID(l)
if err != nil {
return err
}
} else {
parent, _ = s.getGraphDriverID(parent)
}
}
if err := s.opt.GraphDriver.Create(key, parent, nil); err != nil {
return err
}
if err := s.db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(key))
if err != nil {
return err
}
if err := b.Put(keyParent, []byte(origParent)); err != nil {
return err
}
return nil
}); err != nil {
return err
}
return nil
}
func (s *snapshotter) chainID(key string) (layer.ChainID, bool) {
if strings.HasPrefix(key, "sha256:") {
dgst, err := digest.Parse(key)
if err != nil {
return "", false
}
return layer.ChainID(dgst), true
}
return "", false
}
func (s *snapshotter) getLayer(key string, withCommitted bool) (layer.Layer, error) {
s.mu.Lock()
l, ok := s.refs[key]
if !ok {
id, ok := s.chainID(key)
if !ok {
if !withCommitted {
s.mu.Unlock()
return nil, nil
}
if err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(key))
if b == nil {
return nil
}
v := b.Get(keyChainID)
if v != nil {
id = layer.ChainID(v)
}
return nil
}); err != nil {
s.mu.Unlock()
return nil, err
}
if id == "" {
s.mu.Unlock()
return nil, nil
}
}
var err error
l, err = s.opt.LayerStore.Get(id)
if err != nil {
s.mu.Unlock()
return nil, err
}
s.refs[key] = l
if err := s.db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists([]byte(key))
return err
}); err != nil {
s.mu.Unlock()
return nil, err
}
}
s.mu.Unlock()
return l, nil
}
func (s *snapshotter) getGraphDriverID(key string) (string, bool) {
var gdID string
if err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(key))
if b == nil {
return errors.Errorf("not found") // TODO: typed
}
v := b.Get(keyCommitted)
if v != nil {
gdID = string(v)
}
return nil
}); err != nil || gdID == "" {
return key, false
}
return gdID, true
}
func (s *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) {
inf := snapshots.Info{
Kind: snapshots.KindActive,
}
l, err := s.getLayer(key, false)
if err != nil {
return snapshots.Info{}, err
}
if l != nil {
if p := l.Parent(); p != nil {
inf.Parent = p.ChainID().String()
}
inf.Kind = snapshots.KindCommitted
inf.Name = key
return inf, nil
}
l, err = s.getLayer(key, true)
if err != nil {
return snapshots.Info{}, err
}
id, committed := s.getGraphDriverID(key)
if committed {
inf.Kind = snapshots.KindCommitted
}
if err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(id))
if b == nil && l == nil {
return errors.Errorf("snapshot %s not found", id) // TODO: typed
}
inf.Name = key
if b != nil {
v := b.Get(keyParent)
if v != nil {
inf.Parent = string(v)
return nil
}
}
if l != nil {
if p := l.Parent(); p != nil {
inf.Parent = p.ChainID().String()
}
inf.Kind = snapshots.KindCommitted
}
return nil
}); err != nil {
return snapshots.Info{}, err
}
return inf, nil
}
func (s *snapshotter) Mounts(ctx context.Context, key string) (snapshot.Mountable, error) {
l, err := s.getLayer(key, true)
if err != nil {
return nil, err
}
if l != nil {
id := identity.NewID()
rwlayer, err := s.opt.LayerStore.CreateRWLayer(id, l.ChainID(), nil)
if err != nil {
return nil, err
}
rootfs, err := rwlayer.Mount("")
if err != nil {
return nil, err
}
mnt := []mount.Mount{{
Source: rootfs.Path(),
Type: "bind",
Options: []string{"rbind"},
}}
return &constMountable{
mounts: mnt,
release: func() error {
_, err := s.opt.LayerStore.ReleaseRWLayer(rwlayer)
return err
},
}, nil
}
id, _ := s.getGraphDriverID(key)
rootfs, err := s.opt.GraphDriver.Get(id, "")
if err != nil {
return nil, err
}
mnt := []mount.Mount{{
Source: rootfs.Path(),
Type: "bind",
Options: []string{"rbind"},
}}
return &constMountable{
mounts: mnt,
release: func() error {
return s.opt.GraphDriver.Put(id)
},
}, nil
}
func (s *snapshotter) Remove(ctx context.Context, key string) error {
l, err := s.getLayer(key, true)
if err != nil {
return err
}
id, _ := s.getGraphDriverID(key)
var found bool
if err := s.db.Update(func(tx *bolt.Tx) error {
found = tx.Bucket([]byte(key)) != nil
if found {
tx.DeleteBucket([]byte(key))
if id != key {
tx.DeleteBucket([]byte(id))
}
}
return nil
}); err != nil {
return err
}
if l != nil {
s.mu.Lock()
delete(s.refs, key)
s.mu.Unlock()
_, err := s.opt.LayerStore.Release(l)
return err
}
if !found { // this happens when removing views
return nil
}
return s.opt.GraphDriver.Remove(id)
}
func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error {
return s.db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(name))
if err != nil {
return err
}
if err := b.Put(keyCommitted, []byte(key)); err != nil {
return err
}
return nil
})
}
func (s *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) (snapshot.Mountable, error) {
return s.Mounts(ctx, parent)
}
func (s *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapshots.Info) error) error {
return errors.Errorf("not-implemented")
}
func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) {
// not implemented
return s.Stat(ctx, info.Name)
}
func (s *snapshotter) Usage(ctx context.Context, key string) (us snapshots.Usage, retErr error) {
usage := snapshots.Usage{}
if l, err := s.getLayer(key, true); err != nil {
return usage, err
} else if l != nil {
s, err := l.DiffSize()
if err != nil {
return usage, err
}
usage.Size = s
return usage, nil
}
size := int64(-1)
if err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(key))
if b == nil {
return nil
}
v := b.Get(keySize)
if v != nil {
s, err := strconv.Atoi(string(v))
if err != nil {
return err
}
size = int64(s)
}
return nil
}); err != nil {
return usage, err
}
if size != -1 {
usage.Size = size
return usage, nil
}
id, _ := s.getGraphDriverID(key)
info, err := s.Stat(ctx, key)
if err != nil {
return usage, err
}
var parent string
if info.Parent != "" {
if l, err := s.getLayer(info.Parent, false); err != nil {
return usage, err
} else if l != nil {
parent, err = getGraphID(l)
if err != nil {
return usage, err
}
} else {
parent, _ = s.getGraphDriverID(info.Parent)
}
}
diffSize, err := s.opt.GraphDriver.DiffSize(id, parent)
if err != nil {
return usage, err
}
if err := s.db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(key))
if err != nil {
return err
}
return b.Put(keySize, []byte(strconv.Itoa(int(diffSize))))
}); err != nil {
return usage, err
}
usage.Size = diffSize
return usage, nil
}
func (s *snapshotter) Close() error {
return s.db.Close()
}
type constMountable struct {
mounts []mount.Mount
release func() error
}
func (m *constMountable) Mount() ([]mount.Mount, error) {
return m.mounts, nil
}
func (m *constMountable) Release() error {
if m.release == nil {
return nil
}
return m.release()
}

View file

@ -0,0 +1,419 @@
package buildkit
import (
"context"
"encoding/json"
"io"
"strings"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/builder"
"github.com/docker/docker/daemon/images"
"github.com/docker/docker/pkg/jsonmessage"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/control"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/util/tracing"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
grpcmetadata "google.golang.org/grpc/metadata"
)
// Opt is option struct required for creating the builder
type Opt struct {
SessionManager *session.Manager
Root string
Dist images.DistributionServices
}
// Builder can build using BuildKit backend
type Builder struct {
controller *control.Controller
reqBodyHandler *reqBodyHandler
mu sync.Mutex
jobs map[string]*buildJob
}
// New creates a new builder
func New(opt Opt) (*Builder, error) {
reqHandler := newReqBodyHandler(tracing.DefaultTransport)
c, err := newController(reqHandler, opt)
if err != nil {
return nil, err
}
b := &Builder{
controller: c,
reqBodyHandler: reqHandler,
jobs: map[string]*buildJob{},
}
return b, nil
}
// Cancel cancels a build using ID
func (b *Builder) Cancel(ctx context.Context, id string) error {
b.mu.Lock()
if j, ok := b.jobs[id]; ok && j.cancel != nil {
j.cancel()
}
b.mu.Unlock()
return nil
}
// DiskUsage returns a report about space used by build cache
func (b *Builder) DiskUsage(ctx context.Context) ([]*types.BuildCache, error) {
duResp, err := b.controller.DiskUsage(ctx, &controlapi.DiskUsageRequest{})
if err != nil {
return nil, err
}
var items []*types.BuildCache
for _, r := range duResp.Record {
items = append(items, &types.BuildCache{
ID: r.ID,
Mutable: r.Mutable,
InUse: r.InUse,
Size: r.Size_,
CreatedAt: r.CreatedAt,
LastUsedAt: r.LastUsedAt,
UsageCount: int(r.UsageCount),
Parent: r.Parent,
Description: r.Description,
})
}
return items, nil
}
// Prune clears all reclaimable build cache
func (b *Builder) Prune(ctx context.Context) (int64, error) {
ch := make(chan *controlapi.UsageRecord)
eg, ctx := errgroup.WithContext(ctx)
eg.Go(func() error {
defer close(ch)
return b.controller.Prune(&controlapi.PruneRequest{}, &pruneProxy{
streamProxy: streamProxy{ctx: ctx},
ch: ch,
})
})
var size int64
eg.Go(func() error {
for r := range ch {
size += r.Size_
}
return nil
})
if err := eg.Wait(); err != nil {
return 0, err
}
return size, nil
}
// Build executes a build request
func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.Result, error) {
var rc = opt.Source
if buildID := opt.Options.BuildID; buildID != "" {
b.mu.Lock()
upload := false
if strings.HasPrefix(buildID, "upload-request:") {
upload = true
buildID = strings.TrimPrefix(buildID, "upload-request:")
}
if _, ok := b.jobs[buildID]; !ok {
b.jobs[buildID] = newBuildJob()
}
j := b.jobs[buildID]
var cancel func()
ctx, cancel = context.WithCancel(ctx)
j.cancel = cancel
b.mu.Unlock()
if upload {
ctx2, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
err := j.SetUpload(ctx2, rc)
return nil, err
}
if remoteContext := opt.Options.RemoteContext; remoteContext == "upload-request" {
ctx2, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
var err error
rc, err = j.WaitUpload(ctx2)
if err != nil {
return nil, err
}
opt.Options.RemoteContext = ""
}
defer func() {
delete(b.jobs, buildID)
}()
}
var out builder.Result
id := identity.NewID()
frontendAttrs := map[string]string{}
if opt.Options.Target != "" {
frontendAttrs["target"] = opt.Options.Target
}
if opt.Options.Dockerfile != "" && opt.Options.Dockerfile != "." {
frontendAttrs["filename"] = opt.Options.Dockerfile
}
if opt.Options.RemoteContext != "" {
if opt.Options.RemoteContext != "client-session" {
frontendAttrs["context"] = opt.Options.RemoteContext
}
} else {
url, cancel := b.reqBodyHandler.newRequest(rc)
defer cancel()
frontendAttrs["context"] = url
}
cacheFrom := append([]string{}, opt.Options.CacheFrom...)
frontendAttrs["cache-from"] = strings.Join(cacheFrom, ",")
for k, v := range opt.Options.BuildArgs {
if v == nil {
continue
}
frontendAttrs["build-arg:"+k] = *v
}
for k, v := range opt.Options.Labels {
frontendAttrs["label:"+k] = v
}
if opt.Options.NoCache {
frontendAttrs["no-cache"] = ""
}
exporterAttrs := map[string]string{}
if len(opt.Options.Tags) > 0 {
exporterAttrs["name"] = strings.Join(opt.Options.Tags, ",")
}
req := &controlapi.SolveRequest{
Ref: id,
Exporter: "moby",
ExporterAttrs: exporterAttrs,
Frontend: "dockerfile.v0",
FrontendAttrs: frontendAttrs,
Session: opt.Options.SessionID,
}
eg, ctx := errgroup.WithContext(ctx)
eg.Go(func() error {
resp, err := b.controller.Solve(ctx, req)
if err != nil {
return err
}
id, ok := resp.ExporterResponse["containerimage.digest"]
if !ok {
return errors.Errorf("missing image id")
}
out.ImageID = id
return nil
})
ch := make(chan *controlapi.StatusResponse)
eg.Go(func() error {
defer close(ch)
return b.controller.Status(&controlapi.StatusRequest{
Ref: id,
}, &statusProxy{streamProxy: streamProxy{ctx: ctx}, ch: ch})
})
eg.Go(func() error {
for sr := range ch {
dt, err := sr.Marshal()
if err != nil {
return err
}
auxJSONBytes, err := json.Marshal(dt)
if err != nil {
return err
}
auxJSON := new(json.RawMessage)
*auxJSON = auxJSONBytes
msgJSON, err := json.Marshal(&jsonmessage.JSONMessage{ID: "moby.buildkit.trace", Aux: auxJSON})
if err != nil {
return err
}
msgJSON = append(msgJSON, []byte("\r\n")...)
n, err := opt.ProgressWriter.Output.Write(msgJSON)
if err != nil {
return err
}
if n != len(msgJSON) {
return io.ErrShortWrite
}
}
return nil
})
if err := eg.Wait(); err != nil {
return nil, err
}
return &out, nil
}
type streamProxy struct {
ctx context.Context
}
func (sp *streamProxy) SetHeader(_ grpcmetadata.MD) error {
return nil
}
func (sp *streamProxy) SendHeader(_ grpcmetadata.MD) error {
return nil
}
func (sp *streamProxy) SetTrailer(_ grpcmetadata.MD) {
}
func (sp *streamProxy) Context() context.Context {
return sp.ctx
}
func (sp *streamProxy) RecvMsg(m interface{}) error {
return io.EOF
}
type statusProxy struct {
streamProxy
ch chan *controlapi.StatusResponse
}
func (sp *statusProxy) Send(resp *controlapi.StatusResponse) error {
return sp.SendMsg(resp)
}
func (sp *statusProxy) SendMsg(m interface{}) error {
if sr, ok := m.(*controlapi.StatusResponse); ok {
sp.ch <- sr
}
return nil
}
type pruneProxy struct {
streamProxy
ch chan *controlapi.UsageRecord
}
func (sp *pruneProxy) Send(resp *controlapi.UsageRecord) error {
return sp.SendMsg(resp)
}
func (sp *pruneProxy) SendMsg(m interface{}) error {
if sr, ok := m.(*controlapi.UsageRecord); ok {
sp.ch <- sr
}
return nil
}
type contentStoreNoLabels struct {
content.Store
}
func (c *contentStoreNoLabels) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
return content.Info{}, nil
}
type wrapRC struct {
io.ReadCloser
once sync.Once
err error
waitCh chan struct{}
}
func (w *wrapRC) Read(b []byte) (int, error) {
n, err := w.ReadCloser.Read(b)
if err != nil {
e := err
if e == io.EOF {
e = nil
}
w.close(e)
}
return n, err
}
func (w *wrapRC) Close() error {
err := w.ReadCloser.Close()
w.close(err)
return err
}
func (w *wrapRC) close(err error) {
w.once.Do(func() {
w.err = err
close(w.waitCh)
})
}
func (w *wrapRC) wait() error {
<-w.waitCh
return w.err
}
type buildJob struct {
cancel func()
waitCh chan func(io.ReadCloser) error
}
func newBuildJob() *buildJob {
return &buildJob{waitCh: make(chan func(io.ReadCloser) error)}
}
func (j *buildJob) WaitUpload(ctx context.Context) (io.ReadCloser, error) {
done := make(chan struct{})
var upload io.ReadCloser
fn := func(rc io.ReadCloser) error {
w := &wrapRC{ReadCloser: rc, waitCh: make(chan struct{})}
upload = w
close(done)
return w.wait()
}
select {
case <-ctx.Done():
return nil, ctx.Err()
case j.waitCh <- fn:
<-done
return upload, nil
}
}
func (j *buildJob) SetUpload(ctx context.Context, rc io.ReadCloser) error {
select {
case <-ctx.Done():
return ctx.Err()
case fn := <-j.waitCh:
return fn(rc)
}
}

View file

@ -0,0 +1,157 @@
package buildkit
import (
"net/http"
"os"
"path/filepath"
"github.com/containerd/containerd/content/local"
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
"github.com/docker/docker/builder/builder-next/adapters/snapshot"
containerimageexp "github.com/docker/docker/builder/builder-next/exporter"
mobyworker "github.com/docker/docker/builder/builder-next/worker"
"github.com/docker/docker/daemon/graphdriver"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/cache/remotecache"
"github.com/moby/buildkit/control"
"github.com/moby/buildkit/exporter"
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/frontend/dockerfile"
"github.com/moby/buildkit/frontend/gateway"
"github.com/moby/buildkit/snapshot/blobmapping"
"github.com/moby/buildkit/solver/boltdbcachestorage"
"github.com/moby/buildkit/worker"
"github.com/pkg/errors"
)
func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
if err := os.MkdirAll(opt.Root, 0700); err != nil {
return nil, err
}
dist := opt.Dist
root := opt.Root
var driver graphdriver.Driver
if ls, ok := dist.LayerStore.(interface {
Driver() graphdriver.Driver
}); ok {
driver = ls.Driver()
} else {
return nil, errors.Errorf("could not access graphdriver")
}
sbase, err := snapshot.NewSnapshotter(snapshot.Opt{
GraphDriver: driver,
LayerStore: dist.LayerStore,
Root: root,
})
if err != nil {
return nil, err
}
store, err := local.NewStore(filepath.Join(root, "content"))
if err != nil {
return nil, err
}
store = &contentStoreNoLabels{store}
md, err := metadata.NewStore(filepath.Join(root, "metadata.db"))
if err != nil {
return nil, err
}
snapshotter := blobmapping.NewSnapshotter(blobmapping.Opt{
Content: store,
Snapshotter: sbase,
MetadataStore: md,
})
cm, err := cache.NewManager(cache.ManagerOpt{
Snapshotter: snapshotter,
MetadataStore: md,
})
if err != nil {
return nil, err
}
src, err := containerimage.NewSource(containerimage.SourceOpt{
SessionManager: opt.SessionManager,
CacheAccessor: cm,
ContentStore: store,
DownloadManager: dist.DownloadManager,
MetadataStore: dist.V2MetadataService,
ImageStore: dist.ImageStore,
ReferenceStore: dist.ReferenceStore,
})
if err != nil {
return nil, err
}
exec, err := newExecutor(root)
if err != nil {
return nil, err
}
differ, ok := sbase.(containerimageexp.Differ)
if !ok {
return nil, errors.Errorf("snapshotter doesn't support differ")
}
exp, err := containerimageexp.New(containerimageexp.Opt{
ImageStore: dist.ImageStore,
ReferenceStore: dist.ReferenceStore,
Differ: differ,
})
if err != nil {
return nil, err
}
cacheStorage, err := boltdbcachestorage.NewStore(filepath.Join(opt.Root, "cache.db"))
if err != nil {
return nil, err
}
frontends := map[string]frontend.Frontend{}
frontends["dockerfile.v0"] = dockerfile.NewDockerfileFrontend()
frontends["gateway.v0"] = gateway.NewGatewayFrontend()
wopt := mobyworker.Opt{
ID: "moby",
SessionManager: opt.SessionManager,
MetadataStore: md,
ContentStore: store,
CacheManager: cm,
Snapshotter: snapshotter,
Executor: exec,
ImageSource: src,
DownloadManager: dist.DownloadManager,
V2MetadataService: dist.V2MetadataService,
Exporters: map[string]exporter.Exporter{
"moby": exp,
},
Transport: rt,
}
wc := &worker.Controller{}
w, err := mobyworker.NewWorker(wopt)
if err != nil {
return nil, err
}
wc.Add(w)
ci := remotecache.NewCacheImporter(remotecache.ImportOpt{
Worker: w,
SessionManager: opt.SessionManager,
})
return control.NewController(control.Opt{
SessionManager: opt.SessionManager,
WorkerController: wc,
Frontends: frontends,
CacheKeyStorage: cacheStorage,
// CacheExporter: ce,
CacheImporter: ci,
})
}

View file

@ -0,0 +1,17 @@
// +build !windows
package buildkit
import (
"path/filepath"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/executor/runcexecutor"
)
func newExecutor(root string) (executor.Executor, error) {
return runcexecutor.New(runcexecutor.Opt{
Root: filepath.Join(root, "executor"),
CommandCandidates: []string{"docker-runc", "runc"},
})
}

View file

@ -0,0 +1,21 @@
package buildkit
import (
"context"
"errors"
"io"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/executor"
)
func newExecutor(_ string) (executor.Executor, error) {
return &winExecutor{}, nil
}
type winExecutor struct {
}
func (e *winExecutor) Exec(ctx context.Context, meta executor.Meta, rootfs cache.Mountable, mounts []executor.Mount, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error {
return errors.New("buildkit executor not implemented for windows")
}

View file

@ -0,0 +1,146 @@
package containerimage
import (
"context"
"fmt"
"strings"
distref "github.com/docker/distribution/reference"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/reference"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/exporter"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
const (
keyImageName = "name"
exporterImageConfig = "containerimage.config"
)
// Differ can make a moby layer from a snapshot
type Differ interface {
EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error)
}
// Opt defines a struct for creating new exporter
type Opt struct {
ImageStore image.Store
ReferenceStore reference.Store
Differ Differ
}
type imageExporter struct {
opt Opt
}
// New creates a new moby imagestore exporter
func New(opt Opt) (exporter.Exporter, error) {
im := &imageExporter{opt: opt}
return im, nil
}
func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) {
i := &imageExporterInstance{imageExporter: e}
for k, v := range opt {
switch k {
case keyImageName:
for _, v := range strings.Split(v, ",") {
ref, err := distref.ParseNormalizedNamed(v)
if err != nil {
return nil, err
}
i.targetNames = append(i.targetNames, ref)
}
case exporterImageConfig:
i.config = []byte(v)
default:
logrus.Warnf("image exporter: unknown option %s", k)
}
}
return i, nil
}
type imageExporterInstance struct {
*imageExporter
targetNames []distref.Named
config []byte
}
func (e *imageExporterInstance) Name() string {
return "exporting to image"
}
func (e *imageExporterInstance) Export(ctx context.Context, ref cache.ImmutableRef, opt map[string][]byte) (map[string]string, error) {
if config, ok := opt[exporterImageConfig]; ok {
e.config = config
}
config := e.config
var diffs []digest.Digest
if ref != nil {
layersDone := oneOffProgress(ctx, "exporting layers")
if err := ref.Finalize(ctx); err != nil {
return nil, err
}
diffIDs, err := e.opt.Differ.EnsureLayer(ctx, ref.ID())
if err != nil {
return nil, err
}
diffs = make([]digest.Digest, len(diffIDs))
for i := range diffIDs {
diffs[i] = digest.Digest(diffIDs[i])
}
layersDone(nil)
}
if len(config) == 0 {
var err error
config, err = emptyImageConfig()
if err != nil {
return nil, err
}
}
history, err := parseHistoryFromConfig(config)
if err != nil {
return nil, err
}
diffs, history = normalizeLayersAndHistory(diffs, history, ref)
config, err = patchImageConfig(config, diffs, history)
if err != nil {
return nil, err
}
configDigest := digest.FromBytes(config)
configDone := oneOffProgress(ctx, fmt.Sprintf("writing image %s", configDigest))
id, err := e.opt.ImageStore.Create(config)
if err != nil {
return nil, configDone(err)
}
configDone(nil)
if e.opt.ReferenceStore != nil {
for _, targetName := range e.targetNames {
tagDone := oneOffProgress(ctx, "naming to "+targetName.String())
if err := e.opt.ReferenceStore.AddTag(targetName, digest.Digest(id), true); err != nil {
return nil, tagDone(err)
}
tagDone(nil)
}
}
return map[string]string{
"containerimage.digest": id.String(),
}, nil
}

View file

@ -0,0 +1,177 @@
package containerimage
import (
"context"
"encoding/json"
"runtime"
"time"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/util/progress"
"github.com/moby/buildkit/util/system"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// const (
// emptyGZLayer = digest.Digest("sha256:4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1")
// )
func emptyImageConfig() ([]byte, error) {
img := ocispec.Image{
Architecture: runtime.GOARCH,
OS: runtime.GOOS,
}
img.RootFS.Type = "layers"
img.Config.WorkingDir = "/"
img.Config.Env = []string{"PATH=" + system.DefaultPathEnv}
dt, err := json.Marshal(img)
return dt, errors.Wrap(err, "failed to create empty image config")
}
func parseHistoryFromConfig(dt []byte) ([]ocispec.History, error) {
var config struct {
History []ocispec.History
}
if err := json.Unmarshal(dt, &config); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal history from config")
}
return config.History, nil
}
func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History) ([]byte, error) {
m := map[string]json.RawMessage{}
if err := json.Unmarshal(dt, &m); err != nil {
return nil, errors.Wrap(err, "failed to parse image config for patch")
}
var rootFS ocispec.RootFS
rootFS.Type = "layers"
rootFS.DiffIDs = append(rootFS.DiffIDs, dps...)
dt, err := json.Marshal(rootFS)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal rootfs")
}
m["rootfs"] = dt
dt, err = json.Marshal(history)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal history")
}
m["history"] = dt
if _, ok := m["created"]; !ok {
var tm *time.Time
for _, h := range history {
if h.Created != nil {
tm = h.Created
}
}
dt, err = json.Marshal(&tm)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal creation time")
}
m["created"] = dt
}
dt, err = json.Marshal(m)
return dt, errors.Wrap(err, "failed to marshal config after patch")
}
func normalizeLayersAndHistory(diffs []digest.Digest, history []ocispec.History, ref cache.ImmutableRef) ([]digest.Digest, []ocispec.History) {
refMeta := getRefMetadata(ref, len(diffs))
var historyLayers int
for _, h := range history {
if !h.EmptyLayer {
historyLayers++
}
}
if historyLayers > len(diffs) {
// this case shouldn't happen but if it does force set history layers empty
// from the bottom
logrus.Warn("invalid image config with unaccounted layers")
historyCopy := make([]ocispec.History, 0, len(history))
var l int
for _, h := range history {
if l >= len(diffs) {
h.EmptyLayer = true
}
if !h.EmptyLayer {
l++
}
historyCopy = append(historyCopy, h)
}
history = historyCopy
}
if len(diffs) > historyLayers {
// some history items are missing. add them based on the ref metadata
for _, md := range refMeta[historyLayers:] {
history = append(history, ocispec.History{
Created: &md.createdAt,
CreatedBy: md.description,
Comment: "buildkit.exporter.image.v0",
})
}
}
var layerIndex int
for i, h := range history {
if !h.EmptyLayer {
if h.Created == nil {
h.Created = &refMeta[layerIndex].createdAt
}
layerIndex++
}
history[i] = h
}
return diffs, history
}
type refMetadata struct {
description string
createdAt time.Time
}
func getRefMetadata(ref cache.ImmutableRef, limit int) []refMetadata {
if limit <= 0 {
return nil
}
meta := refMetadata{
description: "created by buildkit", // shouldn't be shown but don't fail build
createdAt: time.Now(),
}
if ref == nil {
return append(getRefMetadata(nil, limit-1), meta)
}
if descr := cache.GetDescription(ref.Metadata()); descr != "" {
meta.description = descr
}
meta.createdAt = cache.GetCreatedAt(ref.Metadata())
p := ref.Parent()
if p != nil {
defer p.Release(context.TODO())
}
return append(getRefMetadata(p, limit-1), meta)
}
func oneOffProgress(ctx context.Context, id string) func(err error) error {
pw, _, _ := progress.FromContext(ctx)
now := time.Now()
st := progress.Status{
Started: &now,
}
pw.Write(id, st)
return func(err error) error {
// TODO: set error on status
now := time.Now()
st.Completed = &now
pw.Write(id, st)
pw.Close()
return err
}
}

View file

@ -0,0 +1,67 @@
package buildkit
import (
"io"
"net/http"
"strings"
"sync"
"github.com/moby/buildkit/identity"
"github.com/pkg/errors"
)
const urlPrefix = "build-context-"
type reqBodyHandler struct {
mu sync.Mutex
rt http.RoundTripper
requests map[string]io.ReadCloser
}
func newReqBodyHandler(rt http.RoundTripper) *reqBodyHandler {
return &reqBodyHandler{
rt: rt,
requests: map[string]io.ReadCloser{},
}
}
func (h *reqBodyHandler) newRequest(rc io.ReadCloser) (string, func()) {
id := identity.NewID()
h.mu.Lock()
h.requests[id] = rc
h.mu.Unlock()
return "http://" + urlPrefix + id, func() {
h.mu.Lock()
delete(h.requests, id)
h.mu.Unlock()
}
}
func (h *reqBodyHandler) RoundTrip(req *http.Request) (*http.Response, error) {
host := req.URL.Host
if strings.HasPrefix(host, urlPrefix) {
if req.Method != "GET" {
return nil, errors.Errorf("invalid request")
}
id := strings.TrimPrefix(host, urlPrefix)
h.mu.Lock()
rc, ok := h.requests[id]
delete(h.requests, id)
h.mu.Unlock()
if !ok {
return nil, errors.Errorf("context not found")
}
resp := &http.Response{
Status: "200 OK",
StatusCode: 200,
Body: rc,
ContentLength: -1,
}
return resp, nil
}
return h.rt.RoundTrip(req)
}

View file

@ -0,0 +1,321 @@
package worker
import (
"context"
"fmt"
"io"
"io/ioutil"
nethttp "net/http"
"runtime"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/rootfs"
"github.com/docker/docker/distribution"
distmetadata "github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
pkgprogress "github.com/docker/docker/pkg/progress"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/exporter"
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/llbsolver/ops"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/source"
"github.com/moby/buildkit/source/git"
"github.com/moby/buildkit/source/http"
"github.com/moby/buildkit/source/local"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/progress"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// Opt defines a structure for creating a worker.
type Opt struct {
ID string
Labels map[string]string
SessionManager *session.Manager
MetadataStore *metadata.Store
Executor executor.Executor
Snapshotter snapshot.Snapshotter
ContentStore content.Store
CacheManager cache.Manager
ImageSource source.Source
Exporters map[string]exporter.Exporter
DownloadManager distribution.RootFSDownloadManager
V2MetadataService distmetadata.V2MetadataService
Transport nethttp.RoundTripper
}
// Worker is a local worker instance with dedicated snapshotter, cache, and so on.
// TODO: s/Worker/OpWorker/g ?
type Worker struct {
Opt
SourceManager *source.Manager
}
// NewWorker instantiates a local worker
func NewWorker(opt Opt) (*Worker, error) {
sm, err := source.NewManager()
if err != nil {
return nil, err
}
cm := opt.CacheManager
sm.Register(opt.ImageSource)
gs, err := git.NewSource(git.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
})
if err != nil {
return nil, err
}
sm.Register(gs)
hs, err := http.NewSource(http.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
Transport: opt.Transport,
})
if err != nil {
return nil, err
}
sm.Register(hs)
ss, err := local.NewSource(local.Opt{
SessionManager: opt.SessionManager,
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
})
if err != nil {
return nil, err
}
sm.Register(ss)
return &Worker{
Opt: opt,
SourceManager: sm,
}, nil
}
// ID returns worker ID
func (w *Worker) ID() string {
return w.Opt.ID
}
// Labels returns map of all worker labels
func (w *Worker) Labels() map[string]string {
return w.Opt.Labels
}
// LoadRef loads a reference by ID
func (w *Worker) LoadRef(id string) (cache.ImmutableRef, error) {
return w.CacheManager.Get(context.TODO(), id)
}
// ResolveOp converts a LLB vertex into a LLB operation
func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge) (solver.Op, error) {
switch op := v.Sys().(type) {
case *pb.Op_Source:
return ops.NewSourceOp(v, op, w.SourceManager, w)
case *pb.Op_Exec:
return ops.NewExecOp(v, op, w.CacheManager, w.MetadataStore, w.Executor, w)
case *pb.Op_Build:
return ops.NewBuildOp(v, op, s, w)
default:
return nil, errors.Errorf("could not resolve %v", v)
}
}
// ResolveImageConfig returns image config for an image
func (w *Worker) ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error) {
// ImageSource is typically source/containerimage
resolveImageConfig, ok := w.ImageSource.(resolveImageConfig)
if !ok {
return "", nil, errors.Errorf("worker %q does not implement ResolveImageConfig", w.ID())
}
return resolveImageConfig.ResolveImageConfig(ctx, ref)
}
// Exec executes a process directly on a worker
func (w *Worker) Exec(ctx context.Context, meta executor.Meta, rootFS cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error {
active, err := w.CacheManager.New(ctx, rootFS)
if err != nil {
return err
}
defer active.Release(context.TODO())
return w.Executor.Exec(ctx, meta, active, nil, stdin, stdout, stderr)
}
// DiskUsage returns disk usage report
func (w *Worker) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) {
return w.CacheManager.DiskUsage(ctx, opt)
}
// Prune deletes reclaimable build cache
func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo) error {
return w.CacheManager.Prune(ctx, ch)
}
// Exporter returns exporter by name
func (w *Worker) Exporter(name string) (exporter.Exporter, error) {
exp, ok := w.Exporters[name]
if !ok {
return nil, errors.Errorf("exporter %q could not be found", name)
}
return exp, nil
}
// GetRemote returns a remote snapshot reference for a local one
func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool) (*solver.Remote, error) {
return nil, errors.Errorf("getremote not implemented")
}
// FromRemote converts a remote snapshot reference to a local one
func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) {
rootfs, err := getLayers(ctx, remote.Descriptors)
if err != nil {
return nil, err
}
layers := make([]xfer.DownloadDescriptor, 0, len(rootfs))
for _, l := range rootfs {
// ongoing.add(desc)
layers = append(layers, &layerDescriptor{
desc: l.Blob,
diffID: layer.DiffID(l.Diff.Digest),
provider: remote.Provider,
w: w,
pctx: ctx,
})
}
defer func() {
for _, l := range rootfs {
w.ContentStore.Delete(context.TODO(), l.Blob.Digest)
}
}()
r := image.NewRootFS()
rootFS, release, err := w.DownloadManager.Download(ctx, *r, runtime.GOOS, layers, &discardProgress{})
if err != nil {
return nil, err
}
defer release()
ref, err := w.CacheManager.GetFromSnapshotter(ctx, string(rootFS.ChainID()), cache.WithDescription(fmt.Sprintf("imported %s", remote.Descriptors[len(remote.Descriptors)-1].Digest)))
if err != nil {
return nil, err
}
return ref, nil
}
type discardProgress struct{}
func (*discardProgress) WriteProgress(_ pkgprogress.Progress) error {
return nil
}
// Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
type layerDescriptor struct {
provider content.Provider
desc ocispec.Descriptor
diffID layer.DiffID
// ref ctdreference.Spec
w *Worker
pctx context.Context
}
func (ld *layerDescriptor) Key() string {
return "v2:" + ld.desc.Digest.String()
}
func (ld *layerDescriptor) ID() string {
return ld.desc.Digest.String()
}
func (ld *layerDescriptor) DiffID() (layer.DiffID, error) {
return ld.diffID, nil
}
func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) {
done := oneOffProgress(ld.pctx, fmt.Sprintf("pulling %s", ld.desc.Digest))
if err := contentutil.Copy(ctx, ld.w.ContentStore, ld.provider, ld.desc); err != nil {
return nil, 0, done(err)
}
done(nil)
ra, err := ld.w.ContentStore.ReaderAt(ctx, ld.desc)
if err != nil {
return nil, 0, err
}
return ioutil.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
}
func (ld *layerDescriptor) Close() {
// ld.is.ContentStore.Delete(context.TODO(), ld.desc.Digest)
}
func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
// Cache mapping from this layer's DiffID to the blobsum
ld.w.V2MetadataService.Add(diffID, distmetadata.V2Metadata{Digest: ld.desc.Digest})
}
func getLayers(ctx context.Context, descs []ocispec.Descriptor) ([]rootfs.Layer, error) {
layers := make([]rootfs.Layer, len(descs))
for i, desc := range descs {
diffIDStr := desc.Annotations["containerd.io/uncompressed"]
if diffIDStr == "" {
return nil, errors.Errorf("%s missing uncompressed digest", desc.Digest)
}
diffID, err := digest.Parse(diffIDStr)
if err != nil {
return nil, err
}
layers[i].Diff = ocispec.Descriptor{
MediaType: ocispec.MediaTypeImageLayer,
Digest: diffID,
}
layers[i].Blob = ocispec.Descriptor{
MediaType: desc.MediaType,
Digest: desc.Digest,
Size: desc.Size,
}
}
return layers, nil
}
func oneOffProgress(ctx context.Context, id string) func(err error) error {
pw, _, _ := progress.FromContext(ctx)
now := time.Now()
st := progress.Status{
Started: &now,
}
pw.Write(id, st)
return func(err error) error {
// TODO: set error on status
now := time.Now()
st.Completed = &now
pw.Write(id, st)
pw.Close()
return err
}
}
type resolveImageConfig interface {
ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error)
}

21
client/build_cancel.go Normal file
View file

@ -0,0 +1,21 @@
package client // import "github.com/docker/docker/client"
import (
"net/url"
"golang.org/x/net/context"
)
// BuildCancel requests the daemon to cancel ongoing build request
func (cli *Client) BuildCancel(ctx context.Context, id string) error {
query := url.Values{}
query.Set("id", id)
serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil)
if err != nil {
return err
}
defer ensureReaderClosed(serverResp)
return nil
}

View file

@ -133,5 +133,9 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur
if options.Platform != "" {
query.Set("platform", strings.ToLower(options.Platform))
}
if options.BuildID != "" {
query.Set("buildid", options.BuildID)
}
query.Set("version", string(options.Version))
return query, nil
}

View file

@ -86,6 +86,7 @@ type DistributionAPIClient interface {
type ImageAPIClient interface {
ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error)
BuildCancel(ctx context.Context, id string) error
ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error)
ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error)

View file

@ -27,6 +27,7 @@ import (
swarmrouter "github.com/docker/docker/api/server/router/swarm"
systemrouter "github.com/docker/docker/api/server/router/system"
"github.com/docker/docker/api/server/router/volume"
buildkit "github.com/docker/docker/builder/builder-next"
"github.com/docker/docker/builder/dockerfile"
"github.com/docker/docker/builder/fscache"
"github.com/docker/docker/cli/debug"
@ -238,7 +239,8 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
type routerOptions struct {
sessionManager *session.Manager
buildBackend *buildbackend.Backend
buildCache *fscache.FSCache
buildCache *fscache.FSCache // legacy
buildkit *buildkit.Builder
daemon *daemon.Daemon
api *apiserver.Server
cluster *cluster.Cluster
@ -270,7 +272,16 @@ func newRouterOptions(config *config.Config, daemon *daemon.Daemon) (routerOptio
return opts, err
}
bb, err := buildbackend.NewBackend(daemon.ImageService(), manager, buildCache)
buildkit, err := buildkit.New(buildkit.Opt{
SessionManager: sm,
Root: filepath.Join(config.Root, "buildkit"),
Dist: daemon.DistributionServices(),
})
if err != nil {
return opts, err
}
bb, err := buildbackend.NewBackend(daemon.ImageService(), manager, buildCache, buildkit)
if err != nil {
return opts, errors.Wrap(err, "failed to create buildmanager")
}
@ -279,6 +290,7 @@ func newRouterOptions(config *config.Config, daemon *daemon.Daemon) (routerOptio
sessionManager: sm,
buildBackend: bb,
buildCache: buildCache,
buildkit: buildkit,
daemon: daemon,
}, nil
}
@ -452,7 +464,7 @@ func initRouter(opts routerOptions) {
checkpointrouter.NewRouter(opts.daemon, decoder),
container.NewRouter(opts.daemon, decoder),
image.NewRouter(opts.daemon.ImageService()),
systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildCache),
systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildCache, opts.buildkit),
volume.NewRouter(opts.daemon.VolumesService()),
build.NewRouter(opts.buildBackend, opts.daemon),
sessionrouter.NewRouter(opts.sessionManager),

View file

@ -922,6 +922,11 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe
return d, nil
}
// DistributionServices returns services controlling daemon storage
func (daemon *Daemon) DistributionServices() images.DistributionServices {
return daemon.imageService.DistributionServices()
}
func (daemon *Daemon) waitForStartupDone() {
<-daemon.startupDone
}

View file

@ -3,9 +3,11 @@ package images // import "github.com/docker/docker/daemon/images"
import (
"context"
"os"
"runtime"
"github.com/docker/docker/container"
daemonevents "github.com/docker/docker/daemon/events"
"github.com/docker/docker/distribution"
"github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/image"
@ -74,6 +76,26 @@ type ImageService struct {
uploadManager *xfer.LayerUploadManager
}
// DistributionServices provides daemon image storage services
type DistributionServices struct {
DownloadManager distribution.RootFSDownloadManager
V2MetadataService metadata.V2MetadataService
LayerStore layer.Store // TODO: lcow
ImageStore image.Store
ReferenceStore dockerreference.Store
}
// DistributionServices return services controlling daemon image storage
func (i *ImageService) DistributionServices() DistributionServices {
return DistributionServices{
DownloadManager: i.downloadManager,
V2MetadataService: metadata.NewV2MetadataService(i.distributionMetadataStore),
LayerStore: i.layerStores[runtime.GOOS],
ImageStore: i.imageStore,
ReferenceStore: i.referenceStore,
}
}
// CountImages returns the number of images stored by ImageService
// called from info.go
func (i *ImageService) CountImages() int {

View file

@ -84,6 +84,7 @@ test_env() {
env -i \
DEST="$ABS_DEST" \
DOCKER_API_VERSION="$DOCKER_API_VERSION" \
DOCKER_BUILDKIT="$DOCKER_BUILDKIT" \
DOCKER_INTEGRATION_DAEMON_DEST="$DOCKER_INTEGRATION_DAEMON_DEST" \
DOCKER_TLS_VERIFY="$DOCKER_TEST_TLS_VERIFY" \
DOCKER_CERT_PATH="$DOCKER_TEST_CERT_PATH" \

View file

@ -2534,7 +2534,7 @@ func (s *DockerSuite) TestBuildDockerignoringBadExclusion(c *check.C) {
build.WithFile(".dockerignore", "!\n"),
)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "error checking context: 'illegal exclusion pattern: \"!\"",
Err: `illegal exclusion pattern: "!"`,
})
}

View file

@ -126,8 +126,12 @@ func (s *DockerSuite) TestBuildAddChangeOwnership(c *check.C) {
// * Run a 1-year-long sleep from a docker build.
// * When docker events sees container start, close the "docker build" command
// * Wait for docker events to emit a dying event.
//
// TODO(buildkit): this test needs to be rewritten for buildkit.
// It has been manually tested positive. Confirmed issue: docker build output parsing.
// Potential issue: newEventObserver uses docker events, which is not hooked up to buildkit.
func (s *DockerSuite) TestBuildCancellationKillsSleep(c *check.C) {
testRequires(c, DaemonIsLinux)
testRequires(c, DaemonIsLinux, TODOBuildkit)
name := "testbuildcancellation"
observer, err := newEventObserver(c)

View file

@ -90,7 +90,7 @@ func (s *DockerSuite) TestHealth(c *check.C) {
buildImageSuccessfully(c, "no_healthcheck", build.WithDockerfile(`FROM testhealth
HEALTHCHECK NONE`))
out, _ = dockerCmd(c, "inspect", "--format={{.ContainerConfig.Healthcheck.Test}}", "no_healthcheck")
out, _ = dockerCmd(c, "inspect", "--format={{.Config.Healthcheck.Test}}", "no_healthcheck")
c.Check(out, checker.Equals, "[NONE]\n")
// Enable the checks from the CLI

View file

@ -208,6 +208,10 @@ func SwarmInactive() bool {
return testEnv.DaemonInfo.Swarm.LocalNodeState == swarm.LocalNodeStateInactive
}
func TODOBuildkit() bool {
return os.Getenv("DOCKER_BUILDKIT") == ""
}
// testRequires checks if the environment satisfies the requirements
// for the test to run or skips the tests.
func testRequires(c requirement.SkipT, requirements ...requirement.Test) {

View file

@ -3,7 +3,6 @@ package layer // import "github.com/docker/docker/layer"
import (
"compress/gzip"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
@ -16,6 +15,7 @@ import (
"github.com/docker/distribution"
"github.com/docker/docker/pkg/ioutils"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@ -194,8 +194,8 @@ func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) {
}
content := strings.TrimSpace(string(contentBytes))
if !stringIDRegexp.MatchString(content) {
return "", errors.New("invalid cache id value")
if content == "" {
return "", errors.Errorf("invalid cache id value")
}
return content, nil

View file

@ -121,6 +121,10 @@ func newStoreFromGraphDriver(root string, driver graphdriver.Driver, os string)
return ls, nil
}
func (ls *layerStore) Driver() graphdriver.Driver {
return ls.driver
}
func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) {
cl, ok := ls.layerMap[layer]
if ok {

View file

@ -54,6 +54,10 @@ func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) {
return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID)
}
func (rl *roLayer) CacheID() string {
return rl.cacheID
}
func (rl *roLayer) ChainID() ChainID {
return rl.chainID
}

View file

@ -27,10 +27,13 @@ github.com/imdario/mergo v0.3.5
golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5
# buildkit
github.com/moby/buildkit 43e758232a0ac7d50c6a11413186e16684fc1e4f
github.com/tonistiigi/fsutil dc68c74458923f357474a9178bd198aa3ed11a5f
github.com/moby/buildkit b062a2d8ddbaa477c25c63d68a9cffbb43f6e474
github.com/tonistiigi/fsutil 8abad97ee3969cdf5e9c367f46adba2c212b3ddb
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716
github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc
github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
#get libnetwork packages
@ -72,8 +75,8 @@ github.com/pborman/uuid v1.0
google.golang.org/grpc v1.12.0
# When updating, also update RUNC_COMMIT in hack/dockerfile/install/runc accordingly
github.com/opencontainers/runc 69663f0bd4b60df09991c08812a60108003fa340
# This does not need to match RUNC_COMMIT as it is used for helper packages but should be newer or equal
github.com/opencontainers/runc 0e561642f81e84ebd0b3afd6ec510c75a2ccb71b
github.com/opencontainers/runtime-spec v1.0.1
github.com/opencontainers/image-spec v1.0.1
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
@ -131,7 +134,7 @@ github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b65068
golang.org/x/crypto 1a580b3eff7814fc9b40602fd35256c63b50f491
golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git
github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8
github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0

View file

@ -0,0 +1,11 @@
# contrib
The `contrib` directory contains packages that do not belong in the core containerd packages but still contribute to overall containerd usability.
Package such as Apparmor or Selinux are placed in `contrib` because they are platform dependent and often require higher level tools and profiles to work.
Packaging and other built tools can be added to `contrib` to aid in packaging containerd for various distributions.
## Testing
Code in the `contrib` directory may or may not have been tested in the normal test pipeline for core components.

View file

@ -0,0 +1,56 @@
// +build linux
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package seccomp
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/oci"
"github.com/opencontainers/runtime-spec/specs-go"
)
// WithProfile receives the name of a file stored on disk comprising a json
// formated seccomp profile, as specified by the opencontainers/runtime-spec.
// The profile is read from the file, unmarshaled, and set to the spec.
func WithProfile(profile string) oci.SpecOpts {
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
s.Linux.Seccomp = &specs.LinuxSeccomp{}
f, err := ioutil.ReadFile(profile)
if err != nil {
return fmt.Errorf("Cannot load seccomp profile %q: %v", profile, err)
}
if err := json.Unmarshal(f, s.Linux.Seccomp); err != nil {
return fmt.Errorf("Decoding seccomp profile failed %q: %v", profile, err)
}
return nil
}
}
// WithDefaultProfile sets the default seccomp profile to the spec.
// Note: must follow the setting of process capabilities
func WithDefaultProfile() oci.SpecOpts {
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
s.Linux.Seccomp = DefaultProfile(s)
return nil
}
}

View file

@ -0,0 +1,581 @@
// +build linux
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package seccomp
import (
"runtime"
"syscall"
"github.com/opencontainers/runtime-spec/specs-go"
)
func arches() []specs.Arch {
switch runtime.GOARCH {
case "amd64":
return []specs.Arch{specs.ArchX86_64, specs.ArchX86, specs.ArchX32}
case "arm64":
return []specs.Arch{specs.ArchARM, specs.ArchAARCH64}
case "mips64":
return []specs.Arch{specs.ArchMIPS, specs.ArchMIPS64, specs.ArchMIPS64N32}
case "mips64n32":
return []specs.Arch{specs.ArchMIPS, specs.ArchMIPS64, specs.ArchMIPS64N32}
case "mipsel64":
return []specs.Arch{specs.ArchMIPSEL, specs.ArchMIPSEL64, specs.ArchMIPSEL64N32}
case "mipsel64n32":
return []specs.Arch{specs.ArchMIPSEL, specs.ArchMIPSEL64, specs.ArchMIPSEL64N32}
case "s390x":
return []specs.Arch{specs.ArchS390, specs.ArchS390X}
default:
return []specs.Arch{}
}
}
// DefaultProfile defines the whitelist for the default seccomp profile.
func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
syscalls := []specs.LinuxSyscall{
{
Names: []string{
"accept",
"accept4",
"access",
"alarm",
"alarm",
"bind",
"brk",
"capget",
"capset",
"chdir",
"chmod",
"chown",
"chown32",
"clock_getres",
"clock_gettime",
"clock_nanosleep",
"close",
"connect",
"copy_file_range",
"creat",
"dup",
"dup2",
"dup3",
"epoll_create",
"epoll_create1",
"epoll_ctl",
"epoll_ctl_old",
"epoll_pwait",
"epoll_wait",
"epoll_wait_old",
"eventfd",
"eventfd2",
"execve",
"execveat",
"exit",
"exit_group",
"faccessat",
"fadvise64",
"fadvise64_64",
"fallocate",
"fanotify_mark",
"fchdir",
"fchmod",
"fchmodat",
"fchown",
"fchown32",
"fchownat",
"fcntl",
"fcntl64",
"fdatasync",
"fgetxattr",
"flistxattr",
"flock",
"fork",
"fremovexattr",
"fsetxattr",
"fstat",
"fstat64",
"fstatat64",
"fstatfs",
"fstatfs64",
"fsync",
"ftruncate",
"ftruncate64",
"futex",
"futimesat",
"getcpu",
"getcwd",
"getdents",
"getdents64",
"getegid",
"getegid32",
"geteuid",
"geteuid32",
"getgid",
"getgid32",
"getgroups",
"getgroups32",
"getitimer",
"getpeername",
"getpgid",
"getpgrp",
"getpid",
"getppid",
"getpriority",
"getrandom",
"getresgid",
"getresgid32",
"getresuid",
"getresuid32",
"getrlimit",
"get_robust_list",
"getrusage",
"getsid",
"getsockname",
"getsockopt",
"get_thread_area",
"gettid",
"gettimeofday",
"getuid",
"getuid32",
"getxattr",
"inotify_add_watch",
"inotify_init",
"inotify_init1",
"inotify_rm_watch",
"io_cancel",
"ioctl",
"io_destroy",
"io_getevents",
"ioprio_get",
"ioprio_set",
"io_setup",
"io_submit",
"ipc",
"kill",
"lchown",
"lchown32",
"lgetxattr",
"link",
"linkat",
"listen",
"listxattr",
"llistxattr",
"_llseek",
"lremovexattr",
"lseek",
"lsetxattr",
"lstat",
"lstat64",
"madvise",
"memfd_create",
"mincore",
"mkdir",
"mkdirat",
"mknod",
"mknodat",
"mlock",
"mlock2",
"mlockall",
"mmap",
"mmap2",
"mprotect",
"mq_getsetattr",
"mq_notify",
"mq_open",
"mq_timedreceive",
"mq_timedsend",
"mq_unlink",
"mremap",
"msgctl",
"msgget",
"msgrcv",
"msgsnd",
"msync",
"munlock",
"munlockall",
"munmap",
"nanosleep",
"newfstatat",
"_newselect",
"open",
"openat",
"pause",
"pipe",
"pipe2",
"poll",
"ppoll",
"prctl",
"pread64",
"preadv",
"prlimit64",
"pselect6",
"pwrite64",
"pwritev",
"read",
"readahead",
"readlink",
"readlinkat",
"readv",
"recv",
"recvfrom",
"recvmmsg",
"recvmsg",
"remap_file_pages",
"removexattr",
"rename",
"renameat",
"renameat2",
"restart_syscall",
"rmdir",
"rt_sigaction",
"rt_sigpending",
"rt_sigprocmask",
"rt_sigqueueinfo",
"rt_sigreturn",
"rt_sigsuspend",
"rt_sigtimedwait",
"rt_tgsigqueueinfo",
"sched_getaffinity",
"sched_getattr",
"sched_getparam",
"sched_get_priority_max",
"sched_get_priority_min",
"sched_getscheduler",
"sched_rr_get_interval",
"sched_setaffinity",
"sched_setattr",
"sched_setparam",
"sched_setscheduler",
"sched_yield",
"seccomp",
"select",
"semctl",
"semget",
"semop",
"semtimedop",
"send",
"sendfile",
"sendfile64",
"sendmmsg",
"sendmsg",
"sendto",
"setfsgid",
"setfsgid32",
"setfsuid",
"setfsuid32",
"setgid",
"setgid32",
"setgroups",
"setgroups32",
"setitimer",
"setpgid",
"setpriority",
"setregid",
"setregid32",
"setresgid",
"setresgid32",
"setresuid",
"setresuid32",
"setreuid",
"setreuid32",
"setrlimit",
"set_robust_list",
"setsid",
"setsockopt",
"set_thread_area",
"set_tid_address",
"setuid",
"setuid32",
"setxattr",
"shmat",
"shmctl",
"shmdt",
"shmget",
"shutdown",
"sigaltstack",
"signalfd",
"signalfd4",
"sigreturn",
"socket",
"socketcall",
"socketpair",
"splice",
"stat",
"stat64",
"statfs",
"statfs64",
"symlink",
"symlinkat",
"sync",
"sync_file_range",
"syncfs",
"sysinfo",
"syslog",
"tee",
"tgkill",
"time",
"timer_create",
"timer_delete",
"timerfd_create",
"timerfd_gettime",
"timerfd_settime",
"timer_getoverrun",
"timer_gettime",
"timer_settime",
"times",
"tkill",
"truncate",
"truncate64",
"ugetrlimit",
"umask",
"uname",
"unlink",
"unlinkat",
"utime",
"utimensat",
"utimes",
"vfork",
"vmsplice",
"wait4",
"waitid",
"waitpid",
"write",
"writev",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
},
{
Names: []string{"personality"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{
{
Index: 0,
Value: 0x0,
Op: specs.OpEqualTo,
},
},
},
{
Names: []string{"personality"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{
{
Index: 0,
Value: 0x0008,
Op: specs.OpEqualTo,
},
},
},
{
Names: []string{"personality"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{
{
Index: 0,
Value: 0xffffffff,
Op: specs.OpEqualTo,
},
},
},
}
s := &specs.LinuxSeccomp{
DefaultAction: specs.ActErrno,
Architectures: arches(),
Syscalls: syscalls,
}
// include by arch
switch runtime.GOARCH {
case "arm", "arm64":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"arm_fadvise64_64",
"arm_sync_file_range",
"breakpoint",
"cacheflush",
"set_tls",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "amd64":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"arch_prctl",
"modify_ldt",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "386":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"modify_ldt",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "s390", "s390x":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"s390_pci_mmio_read",
"s390_pci_mmio_write",
"s390_runtime_instr",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
}
admin := false
for _, c := range sp.Process.Capabilities.Bounding {
switch c {
case "CAP_DAC_READ_SEARCH":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{"open_by_handle_at"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_ADMIN":
admin = true
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"bpf",
"clone",
"fanotify_init",
"lookup_dcookie",
"mount",
"name_to_handle_at",
"perf_event_open",
"setdomainname",
"sethostname",
"setns",
"umount",
"umount2",
"unshare",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_BOOT":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{"reboot"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_CHROOT":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{"chroot"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_MODULE":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"delete_module",
"init_module",
"finit_module",
"query_module",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_PACCT":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{"acct"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_PTRACE":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"kcmp",
"process_vm_readv",
"process_vm_writev",
"ptrace",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_RAWIO":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"iopl",
"ioperm",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_TIME":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"settimeofday",
"stime",
"adjtimex",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_TTY_CONFIG":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{"vhangup"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
}
}
if !admin {
switch runtime.GOARCH {
case "s390", "s390x":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"clone",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{
{
Index: 1,
Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET,
ValueTwo: 0,
Op: specs.OpMaskedEqual,
},
},
})
default:
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"clone",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{
{
Index: 0,
Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET,
ValueTwo: 0,
Op: specs.OpMaskedEqual,
},
},
})
}
}
return s
}

202
vendor/github.com/google/shlex/COPYING generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

2
vendor/github.com/google/shlex/README generated vendored Normal file
View file

@ -0,0 +1,2 @@
go-shlex is a simple lexer for go that supports shell-style quoting,
commenting, and escaping.

417
vendor/github.com/google/shlex/shlex.go generated vendored Normal file
View file

@ -0,0 +1,417 @@
/*
Copyright 2012 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package shlex implements a simple lexer which splits input in to tokens using
shell-style rules for quoting and commenting.
The basic use case uses the default ASCII lexer to split a string into sub-strings:
shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"}
To process a stream of strings:
l := NewLexer(os.Stdin)
for ; token, err := l.Next(); err != nil {
// process token
}
To access the raw token stream (which includes tokens for comments):
t := NewTokenizer(os.Stdin)
for ; token, err := t.Next(); err != nil {
// process token
}
*/
package shlex
import (
"bufio"
"fmt"
"io"
"strings"
)
// TokenType is a top-level token classification: A word, space, comment, unknown.
type TokenType int
// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape.
type runeTokenClass int
// the internal state used by the lexer state machine
type lexerState int
// Token is a (type, value) pair representing a lexographical token.
type Token struct {
tokenType TokenType
value string
}
// Equal reports whether tokens a, and b, are equal.
// Two tokens are equal if both their types and values are equal. A nil token can
// never be equal to another token.
func (a *Token) Equal(b *Token) bool {
if a == nil || b == nil {
return false
}
if a.tokenType != b.tokenType {
return false
}
return a.value == b.value
}
// Named classes of UTF-8 runes
const (
spaceRunes = " \t\r\n"
escapingQuoteRunes = `"`
nonEscapingQuoteRunes = "'"
escapeRunes = `\`
commentRunes = "#"
)
// Classes of rune token
const (
unknownRuneClass runeTokenClass = iota
spaceRuneClass
escapingQuoteRuneClass
nonEscapingQuoteRuneClass
escapeRuneClass
commentRuneClass
eofRuneClass
)
// Classes of lexographic token
const (
UnknownToken TokenType = iota
WordToken
SpaceToken
CommentToken
)
// Lexer state machine states
const (
startState lexerState = iota // no runes have been seen
inWordState // processing regular runes in a word
escapingState // we have just consumed an escape rune; the next rune is literal
escapingQuotedState // we have just consumed an escape rune within a quoted string
quotingEscapingState // we are within a quoted string that supports escaping ("...")
quotingState // we are within a string that does not support escaping ('...')
commentState // we are within a comment (everything following an unquoted or unescaped #
)
// tokenClassifier is used for classifying rune characters.
type tokenClassifier map[rune]runeTokenClass
func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) {
for _, runeChar := range runes {
typeMap[runeChar] = tokenType
}
}
// newDefaultClassifier creates a new classifier for ASCII characters.
func newDefaultClassifier() tokenClassifier {
t := tokenClassifier{}
t.addRuneClass(spaceRunes, spaceRuneClass)
t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass)
t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass)
t.addRuneClass(escapeRunes, escapeRuneClass)
t.addRuneClass(commentRunes, commentRuneClass)
return t
}
// ClassifyRune classifiees a rune
func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass {
return t[runeVal]
}
// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped.
type Lexer Tokenizer
// NewLexer creates a new lexer from an input stream.
func NewLexer(r io.Reader) *Lexer {
return (*Lexer)(NewTokenizer(r))
}
// Next returns the next word, or an error. If there are no more words,
// the error will be io.EOF.
func (l *Lexer) Next() (string, error) {
for {
token, err := (*Tokenizer)(l).Next()
if err != nil {
return "", err
}
switch token.tokenType {
case WordToken:
return token.value, nil
case CommentToken:
// skip comments
default:
return "", fmt.Errorf("Unknown token type: %v", token.tokenType)
}
}
}
// Tokenizer turns an input stream into a sequence of typed tokens
type Tokenizer struct {
input bufio.Reader
classifier tokenClassifier
}
// NewTokenizer creates a new tokenizer from an input stream.
func NewTokenizer(r io.Reader) *Tokenizer {
input := bufio.NewReader(r)
classifier := newDefaultClassifier()
return &Tokenizer{
input: *input,
classifier: classifier}
}
// scanStream scans the stream for the next token using the internal state machine.
// It will panic if it encounters a rune which it does not know how to handle.
func (t *Tokenizer) scanStream() (*Token, error) {
state := startState
var tokenType TokenType
var value []rune
var nextRune rune
var nextRuneType runeTokenClass
var err error
for {
nextRune, _, err = t.input.ReadRune()
nextRuneType = t.classifier.ClassifyRune(nextRune)
if err == io.EOF {
nextRuneType = eofRuneClass
err = nil
} else if err != nil {
return nil, err
}
switch state {
case startState: // no runes read yet
{
switch nextRuneType {
case eofRuneClass:
{
return nil, io.EOF
}
case spaceRuneClass:
{
}
case escapingQuoteRuneClass:
{
tokenType = WordToken
state = quotingEscapingState
}
case nonEscapingQuoteRuneClass:
{
tokenType = WordToken
state = quotingState
}
case escapeRuneClass:
{
tokenType = WordToken
state = escapingState
}
case commentRuneClass:
{
tokenType = CommentToken
state = commentState
}
default:
{
tokenType = WordToken
value = append(value, nextRune)
state = inWordState
}
}
}
case inWordState: // in a regular word
{
switch nextRuneType {
case eofRuneClass:
{
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
case spaceRuneClass:
{
t.input.UnreadRune()
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
case escapingQuoteRuneClass:
{
state = quotingEscapingState
}
case nonEscapingQuoteRuneClass:
{
state = quotingState
}
case escapeRuneClass:
{
state = escapingState
}
default:
{
value = append(value, nextRune)
}
}
}
case escapingState: // the rune after an escape character
{
switch nextRuneType {
case eofRuneClass:
{
err = fmt.Errorf("EOF found after escape character")
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
default:
{
state = inWordState
value = append(value, nextRune)
}
}
}
case escapingQuotedState: // the next rune after an escape character, in double quotes
{
switch nextRuneType {
case eofRuneClass:
{
err = fmt.Errorf("EOF found after escape character")
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
default:
{
state = quotingEscapingState
value = append(value, nextRune)
}
}
}
case quotingEscapingState: // in escaping double quotes
{
switch nextRuneType {
case eofRuneClass:
{
err = fmt.Errorf("EOF found when expecting closing quote")
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
case escapingQuoteRuneClass:
{
state = inWordState
}
case escapeRuneClass:
{
state = escapingQuotedState
}
default:
{
value = append(value, nextRune)
}
}
}
case quotingState: // in non-escaping single quotes
{
switch nextRuneType {
case eofRuneClass:
{
err = fmt.Errorf("EOF found when expecting closing quote")
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
case nonEscapingQuoteRuneClass:
{
state = inWordState
}
default:
{
value = append(value, nextRune)
}
}
}
case commentState: // in a comment
{
switch nextRuneType {
case eofRuneClass:
{
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
case spaceRuneClass:
{
if nextRune == '\n' {
state = startState
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
} else {
value = append(value, nextRune)
}
}
default:
{
value = append(value, nextRune)
}
}
}
default:
{
return nil, fmt.Errorf("Unexpected state: %v", state)
}
}
}
}
// Next returns the next token in the stream.
func (t *Tokenizer) Next() (*Token, error) {
return t.scanStream()
}
// Split partitions a string into a slice of strings.
func Split(s string) ([]string, error) {
l := NewLexer(strings.NewReader(s))
subStrings := make([]string, 0)
for {
word, err := l.Next()
if err != nil {
if err == io.EOF {
return subStrings, nil
}
return subStrings, err
}
subStrings = append(subStrings, word)
}
}

View file

@ -2,6 +2,7 @@ package iradix
import (
"bytes"
"strings"
"github.com/hashicorp/golang-lru/simplelru"
)
@ -11,7 +12,9 @@ const (
// cache used per transaction. This is used to cache the updates
// to the nodes near the root, while the leaves do not need to be
// cached. This is important for very large transactions to prevent
// the modified cache from growing to be enormous.
// the modified cache from growing to be enormous. This is also used
// to set the max size of the mutation notify maps since those should
// also be bounded in a similar way.
defaultModifiedCache = 8192
)
@ -27,7 +30,11 @@ type Tree struct {
// New returns an empty Tree
func New() *Tree {
t := &Tree{root: &Node{}}
t := &Tree{
root: &Node{
mutateCh: make(chan struct{}),
},
}
return t
}
@ -40,75 +47,208 @@ func (t *Tree) Len() int {
// atomically and returns a new tree when committed. A transaction
// is not thread safe, and should only be used by a single goroutine.
type Txn struct {
root *Node
size int
modified *simplelru.LRU
// root is the modified root for the transaction.
root *Node
// snap is a snapshot of the root node for use if we have to run the
// slow notify algorithm.
snap *Node
// size tracks the size of the tree as it is modified during the
// transaction.
size int
// writable is a cache of writable nodes that have been created during
// the course of the transaction. This allows us to re-use the same
// nodes for further writes and avoid unnecessary copies of nodes that
// have never been exposed outside the transaction. This will only hold
// up to defaultModifiedCache number of entries.
writable *simplelru.LRU
// trackChannels is used to hold channels that need to be notified to
// signal mutation of the tree. This will only hold up to
// defaultModifiedCache number of entries, after which we will set the
// trackOverflow flag, which will cause us to use a more expensive
// algorithm to perform the notifications. Mutation tracking is only
// performed if trackMutate is true.
trackChannels map[chan struct{}]struct{}
trackOverflow bool
trackMutate bool
}
// Txn starts a new transaction that can be used to mutate the tree
func (t *Tree) Txn() *Txn {
txn := &Txn{
root: t.root,
snap: t.root,
size: t.size,
}
return txn
}
// writeNode returns a node to be modified, if the current
// node as already been modified during the course of
// the transaction, it is used in-place.
func (t *Txn) writeNode(n *Node) *Node {
// Ensure the modified set exists
if t.modified == nil {
// TrackMutate can be used to toggle if mutations are tracked. If this is enabled
// then notifications will be issued for affected internal nodes and leaves when
// the transaction is committed.
func (t *Txn) TrackMutate(track bool) {
t.trackMutate = track
}
// trackChannel safely attempts to track the given mutation channel, setting the
// overflow flag if we can no longer track any more. This limits the amount of
// state that will accumulate during a transaction and we have a slower algorithm
// to switch to if we overflow.
func (t *Txn) trackChannel(ch chan struct{}) {
// In overflow, make sure we don't store any more objects.
if t.trackOverflow {
return
}
// If this would overflow the state we reject it and set the flag (since
// we aren't tracking everything that's required any longer).
if len(t.trackChannels) >= defaultModifiedCache {
// Mark that we are in the overflow state
t.trackOverflow = true
// Clear the map so that the channels can be garbage collected. It is
// safe to do this since we have already overflowed and will be using
// the slow notify algorithm.
t.trackChannels = nil
return
}
// Create the map on the fly when we need it.
if t.trackChannels == nil {
t.trackChannels = make(map[chan struct{}]struct{})
}
// Otherwise we are good to track it.
t.trackChannels[ch] = struct{}{}
}
// writeNode returns a node to be modified, if the current node has already been
// modified during the course of the transaction, it is used in-place. Set
// forLeafUpdate to true if you are getting a write node to update the leaf,
// which will set leaf mutation tracking appropriately as well.
func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node {
// Ensure the writable set exists.
if t.writable == nil {
lru, err := simplelru.NewLRU(defaultModifiedCache, nil)
if err != nil {
panic(err)
}
t.modified = lru
t.writable = lru
}
// If this node has already been modified, we can
// continue to use it during this transaction.
if _, ok := t.modified.Get(n); ok {
// If this node has already been modified, we can continue to use it
// during this transaction. We know that we don't need to track it for
// a node update since the node is writable, but if this is for a leaf
// update we track it, in case the initial write to this node didn't
// update the leaf.
if _, ok := t.writable.Get(n); ok {
if t.trackMutate && forLeafUpdate && n.leaf != nil {
t.trackChannel(n.leaf.mutateCh)
}
return n
}
// Copy the existing node
nc := new(Node)
// Mark this node as being mutated.
if t.trackMutate {
t.trackChannel(n.mutateCh)
}
// Mark its leaf as being mutated, if appropriate.
if t.trackMutate && forLeafUpdate && n.leaf != nil {
t.trackChannel(n.leaf.mutateCh)
}
// Copy the existing node. If you have set forLeafUpdate it will be
// safe to replace this leaf with another after you get your node for
// writing. You MUST replace it, because the channel associated with
// this leaf will be closed when this transaction is committed.
nc := &Node{
mutateCh: make(chan struct{}),
leaf: n.leaf,
}
if n.prefix != nil {
nc.prefix = make([]byte, len(n.prefix))
copy(nc.prefix, n.prefix)
}
if n.leaf != nil {
nc.leaf = new(leafNode)
*nc.leaf = *n.leaf
}
if len(n.edges) != 0 {
nc.edges = make([]edge, len(n.edges))
copy(nc.edges, n.edges)
}
// Mark this node as modified
t.modified.Add(n, nil)
// Mark this node as writable.
t.writable.Add(nc, nil)
return nc
}
// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction
// Returns the size of the subtree visited
func (t *Txn) trackChannelsAndCount(n *Node) int {
// Count only leaf nodes
leaves := 0
if n.leaf != nil {
leaves = 1
}
// Mark this node as being mutated.
if t.trackMutate {
t.trackChannel(n.mutateCh)
}
// Mark its leaf as being mutated, if appropriate.
if t.trackMutate && n.leaf != nil {
t.trackChannel(n.leaf.mutateCh)
}
// Recurse on the children
for _, e := range n.edges {
leaves += t.trackChannelsAndCount(e.node)
}
return leaves
}
// mergeChild is called to collapse the given node with its child. This is only
// called when the given node is not a leaf and has a single edge.
func (t *Txn) mergeChild(n *Node) {
// Mark the child node as being mutated since we are about to abandon
// it. We don't need to mark the leaf since we are retaining it if it
// is there.
e := n.edges[0]
child := e.node
if t.trackMutate {
t.trackChannel(child.mutateCh)
}
// Merge the nodes.
n.prefix = concat(n.prefix, child.prefix)
n.leaf = child.leaf
if len(child.edges) != 0 {
n.edges = make([]edge, len(child.edges))
copy(n.edges, child.edges)
} else {
n.edges = nil
}
}
// insert does a recursive insertion
func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) {
// Handle key exhaution
// Handle key exhaustion
if len(search) == 0 {
nc := t.writeNode(n)
var oldVal interface{}
didUpdate := false
if n.isLeaf() {
old := nc.leaf.val
nc.leaf.val = v
return nc, old, true
} else {
nc.leaf = &leafNode{
key: k,
val: v,
}
return nc, nil, false
oldVal = n.leaf.val
didUpdate = true
}
nc := t.writeNode(n, true)
nc.leaf = &leafNode{
mutateCh: make(chan struct{}),
key: k,
val: v,
}
return nc, oldVal, didUpdate
}
// Look for the edge
@ -119,14 +259,16 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface
e := edge{
label: search[0],
node: &Node{
mutateCh: make(chan struct{}),
leaf: &leafNode{
key: k,
val: v,
mutateCh: make(chan struct{}),
key: k,
val: v,
},
prefix: search,
},
}
nc := t.writeNode(n)
nc := t.writeNode(n, false)
nc.addEdge(e)
return nc, nil, false
}
@ -137,7 +279,7 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface
search = search[commonPrefix:]
newChild, oldVal, didUpdate := t.insert(child, k, search, v)
if newChild != nil {
nc := t.writeNode(n)
nc := t.writeNode(n, false)
nc.edges[idx].node = newChild
return nc, oldVal, didUpdate
}
@ -145,9 +287,10 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface
}
// Split the node
nc := t.writeNode(n)
nc := t.writeNode(n, false)
splitNode := &Node{
prefix: search[:commonPrefix],
mutateCh: make(chan struct{}),
prefix: search[:commonPrefix],
}
nc.replaceEdge(edge{
label: search[0],
@ -155,7 +298,7 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface
})
// Restore the existing child node
modChild := t.writeNode(child)
modChild := t.writeNode(child, false)
splitNode.addEdge(edge{
label: modChild.prefix[commonPrefix],
node: modChild,
@ -164,8 +307,9 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface
// Create a new leaf node
leaf := &leafNode{
key: k,
val: v,
mutateCh: make(chan struct{}),
key: k,
val: v,
}
// If the new key is a subset, add to to this node
@ -179,8 +323,9 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface
splitNode.addEdge(edge{
label: search[0],
node: &Node{
leaf: leaf,
prefix: search,
mutateCh: make(chan struct{}),
leaf: leaf,
prefix: search,
},
})
return nc, nil, false
@ -188,19 +333,19 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface
// delete does a recursive deletion
func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) {
// Check for key exhaution
// Check for key exhaustion
if len(search) == 0 {
if !n.isLeaf() {
return nil, nil
}
// Remove the leaf node
nc := t.writeNode(n)
nc := t.writeNode(n, true)
nc.leaf = nil
// Check if this node should be merged
if n != t.root && len(nc.edges) == 1 {
nc.mergeChild()
t.mergeChild(nc)
}
return nc, n.leaf
}
@ -219,14 +364,17 @@ func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) {
return nil, nil
}
// Copy this node
nc := t.writeNode(n)
// Copy this node. WATCH OUT - it's safe to pass "false" here because we
// will only ADD a leaf via nc.mergeChild() if there isn't one due to
// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
// so be careful if you change any of the logic here.
nc := t.writeNode(n, false)
// Delete the edge if the node has no edges
if newChild.leaf == nil && len(newChild.edges) == 0 {
nc.delEdge(label)
if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
nc.mergeChild()
t.mergeChild(nc)
}
} else {
nc.edges[idx].node = newChild
@ -234,6 +382,56 @@ func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) {
return nc, leaf
}
// delete does a recursive deletion
func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) {
// Check for key exhaustion
if len(search) == 0 {
nc := t.writeNode(n, true)
if n.isLeaf() {
nc.leaf = nil
}
nc.edges = nil
return nc, t.trackChannelsAndCount(n)
}
// Look for an edge
label := search[0]
idx, child := n.getEdge(label)
// We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix
// Need to do both so that we can delete prefixes that don't correspond to any node in the tree
if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) {
return nil, 0
}
// Consume the search prefix
if len(child.prefix) > len(search) {
search = []byte("")
} else {
search = search[len(child.prefix):]
}
newChild, numDeletions := t.deletePrefix(n, child, search)
if newChild == nil {
return nil, 0
}
// Copy this node. WATCH OUT - it's safe to pass "false" here because we
// will only ADD a leaf via nc.mergeChild() if there isn't one due to
// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
// so be careful if you change any of the logic here.
nc := t.writeNode(n, false)
// Delete the edge if the node has no edges
if newChild.leaf == nil && len(newChild.edges) == 0 {
nc.delEdge(label)
if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
t.mergeChild(nc)
}
} else {
nc.edges[idx].node = newChild
}
return nc, numDeletions
}
// Insert is used to add or update a given key. The return provides
// the previous value and a bool indicating if any was set.
func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) {
@ -261,6 +459,19 @@ func (t *Txn) Delete(k []byte) (interface{}, bool) {
return nil, false
}
// DeletePrefix is used to delete an entire subtree that matches the prefix
// This will delete all nodes under that prefix
func (t *Txn) DeletePrefix(prefix []byte) bool {
newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix)
if newRoot != nil {
t.root = newRoot
t.size = t.size - numDeletions
return true
}
return false
}
// Root returns the current root of the radix tree within this
// transaction. The root is not safe across insert and delete operations,
// but can be used to read the current state during a transaction.
@ -274,10 +485,115 @@ func (t *Txn) Get(k []byte) (interface{}, bool) {
return t.root.Get(k)
}
// Commit is used to finalize the transaction and return a new tree
// GetWatch is used to lookup a specific key, returning
// the watch channel, value and if it was found
func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
return t.root.GetWatch(k)
}
// Commit is used to finalize the transaction and return a new tree. If mutation
// tracking is turned on then notifications will also be issued.
func (t *Txn) Commit() *Tree {
t.modified = nil
return &Tree{t.root, t.size}
nt := t.CommitOnly()
if t.trackMutate {
t.Notify()
}
return nt
}
// CommitOnly is used to finalize the transaction and return a new tree, but
// does not issue any notifications until Notify is called.
func (t *Txn) CommitOnly() *Tree {
nt := &Tree{t.root, t.size}
t.writable = nil
return nt
}
// slowNotify does a complete comparison of the before and after trees in order
// to trigger notifications. This doesn't require any additional state but it
// is very expensive to compute.
func (t *Txn) slowNotify() {
snapIter := t.snap.rawIterator()
rootIter := t.root.rawIterator()
for snapIter.Front() != nil || rootIter.Front() != nil {
// If we've exhausted the nodes in the old snapshot, we know
// there's nothing remaining to notify.
if snapIter.Front() == nil {
return
}
snapElem := snapIter.Front()
// If we've exhausted the nodes in the new root, we know we need
// to invalidate everything that remains in the old snapshot. We
// know from the loop condition there's something in the old
// snapshot.
if rootIter.Front() == nil {
close(snapElem.mutateCh)
if snapElem.isLeaf() {
close(snapElem.leaf.mutateCh)
}
snapIter.Next()
continue
}
// Do one string compare so we can check the various conditions
// below without repeating the compare.
cmp := strings.Compare(snapIter.Path(), rootIter.Path())
// If the snapshot is behind the root, then we must have deleted
// this node during the transaction.
if cmp < 0 {
close(snapElem.mutateCh)
if snapElem.isLeaf() {
close(snapElem.leaf.mutateCh)
}
snapIter.Next()
continue
}
// If the snapshot is ahead of the root, then we must have added
// this node during the transaction.
if cmp > 0 {
rootIter.Next()
continue
}
// If we have the same path, then we need to see if we mutated a
// node and possibly the leaf.
rootElem := rootIter.Front()
if snapElem != rootElem {
close(snapElem.mutateCh)
if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) {
close(snapElem.leaf.mutateCh)
}
}
snapIter.Next()
rootIter.Next()
}
}
// Notify is used along with TrackMutate to trigger notifications. This must
// only be done once a transaction is committed via CommitOnly, and it is called
// automatically by Commit.
func (t *Txn) Notify() {
if !t.trackMutate {
return
}
// If we've overflowed the tracking state we can't use it in any way and
// need to do a full tree compare.
if t.trackOverflow {
t.slowNotify()
} else {
for ch := range t.trackChannels {
close(ch)
}
}
// Clean up the tracking state so that a re-notify is safe (will trigger
// the else clause above which will be a no-op).
t.trackChannels = nil
t.trackOverflow = false
}
// Insert is used to add or update a given key. The return provides
@ -296,6 +612,14 @@ func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) {
return txn.Commit(), old, ok
}
// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree,
// and a bool indicating if the prefix matched any nodes
func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) {
txn := t.Txn()
ok := txn.DeletePrefix(k)
return txn.Commit(), ok
}
// Root returns the root node of the tree which can be used for richer
// query operations.
func (t *Tree) Root() *Node {

View file

@ -9,11 +9,13 @@ type Iterator struct {
stack []edges
}
// SeekPrefix is used to seek the iterator to a given prefix
func (i *Iterator) SeekPrefix(prefix []byte) {
// SeekPrefixWatch is used to seek the iterator to a given prefix
// and returns the watch channel of the finest granularity
func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
// Wipe the stack
i.stack = nil
n := i.node
watch = n.mutateCh
search := prefix
for {
// Check for key exhaution
@ -29,6 +31,9 @@ func (i *Iterator) SeekPrefix(prefix []byte) {
return
}
// Update to the finest granularity as the search makes progress
watch = n.mutateCh
// Consume the search prefix
if bytes.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
@ -43,6 +48,11 @@ func (i *Iterator) SeekPrefix(prefix []byte) {
}
}
// SeekPrefix is used to seek the iterator to a given prefix
func (i *Iterator) SeekPrefix(prefix []byte) {
i.SeekPrefixWatch(prefix)
}
// Next returns the next node in order
func (i *Iterator) Next() ([]byte, interface{}, bool) {
// Initialize our stack if needed

View file

@ -12,8 +12,9 @@ type WalkFn func(k []byte, v interface{}) bool
// leafNode is used to represent a value
type leafNode struct {
key []byte
val interface{}
mutateCh chan struct{}
key []byte
val interface{}
}
// edge is used to represent an edge node
@ -24,6 +25,9 @@ type edge struct {
// Node is an immutable node in the radix tree
type Node struct {
// mutateCh is closed if this node is modified
mutateCh chan struct{}
// leaf is used to store possible leaf
leaf *leafNode
@ -87,31 +91,14 @@ func (n *Node) delEdge(label byte) {
}
}
func (n *Node) mergeChild() {
e := n.edges[0]
child := e.node
n.prefix = concat(n.prefix, child.prefix)
if child.leaf != nil {
n.leaf = new(leafNode)
*n.leaf = *child.leaf
} else {
n.leaf = nil
}
if len(child.edges) != 0 {
n.edges = make([]edge, len(child.edges))
copy(n.edges, child.edges)
} else {
n.edges = nil
}
}
func (n *Node) Get(k []byte) (interface{}, bool) {
func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
search := k
watch := n.mutateCh
for {
// Check for key exhaution
// Check for key exhaustion
if len(search) == 0 {
if n.isLeaf() {
return n.leaf.val, true
return n.leaf.mutateCh, n.leaf.val, true
}
break
}
@ -122,6 +109,9 @@ func (n *Node) Get(k []byte) (interface{}, bool) {
break
}
// Update to the finest granularity as the search makes progress
watch = n.mutateCh
// Consume the search prefix
if bytes.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
@ -129,7 +119,12 @@ func (n *Node) Get(k []byte) (interface{}, bool) {
break
}
}
return nil, false
return watch, nil, false
}
func (n *Node) Get(k []byte) (interface{}, bool) {
_, val, ok := n.GetWatch(k)
return val, ok
}
// LongestPrefix is like Get, but instead of an
@ -204,6 +199,14 @@ func (n *Node) Iterator() *Iterator {
return &Iterator{node: n}
}
// rawIterator is used to return a raw iterator at the given node to walk the
// tree.
func (n *Node) rawIterator() *rawIterator {
iter := &rawIterator{node: n}
iter.Next()
return iter
}
// Walk is used to walk the tree
func (n *Node) Walk(fn WalkFn) {
recursiveWalk(n, fn)
@ -271,6 +274,66 @@ func (n *Node) WalkPath(path []byte, fn WalkFn) {
}
}
func (n *Node) Seek(prefix []byte) *Seeker {
search := prefix
p := &pos{n: n}
for {
// Check for key exhaution
if len(search) == 0 {
return &Seeker{p}
}
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= search[0]
})
p.current = idx
if idx < len(n.edges) {
n = n.edges[idx].node
if bytes.HasPrefix(search, n.prefix) && len(n.edges) > 0 {
search = search[len(n.prefix):]
p.current++
p = &pos{n: n, prev: p}
continue
}
}
p.current++
return &Seeker{p}
}
}
type Seeker struct {
*pos
}
type pos struct {
n *Node
current int
prev *pos
isLeaf bool
}
func (s *Seeker) Next() (k []byte, v interface{}, ok bool) {
if s.current >= len(s.n.edges) {
if s.prev == nil {
return nil, nil, false
}
s.pos = s.prev
return s.Next()
}
edge := s.n.edges[s.current]
s.current++
if edge.node.leaf != nil && !s.isLeaf {
s.isLeaf = true
s.current--
return edge.node.leaf.key, edge.node.leaf.val, true
}
s.isLeaf = false
s.pos = &pos{n: edge.node, prev: s.pos}
return s.Next()
}
// recursiveWalk is used to do a pre-order walk of a node
// recursively. Returns true if the walk should be aborted
func recursiveWalk(n *Node, fn WalkFn) bool {

View file

@ -0,0 +1,78 @@
package iradix
// rawIterator visits each of the nodes in the tree, even the ones that are not
// leaves. It keeps track of the effective path (what a leaf at a given node
// would be called), which is useful for comparing trees.
type rawIterator struct {
// node is the starting node in the tree for the iterator.
node *Node
// stack keeps track of edges in the frontier.
stack []rawStackEntry
// pos is the current position of the iterator.
pos *Node
// path is the effective path of the current iterator position,
// regardless of whether the current node is a leaf.
path string
}
// rawStackEntry is used to keep track of the cumulative common path as well as
// its associated edges in the frontier.
type rawStackEntry struct {
path string
edges edges
}
// Front returns the current node that has been iterated to.
func (i *rawIterator) Front() *Node {
return i.pos
}
// Path returns the effective path of the current node, even if it's not actually
// a leaf.
func (i *rawIterator) Path() string {
return i.path
}
// Next advances the iterator to the next node.
func (i *rawIterator) Next() {
// Initialize our stack if needed.
if i.stack == nil && i.node != nil {
i.stack = []rawStackEntry{
rawStackEntry{
edges: edges{
edge{node: i.node},
},
},
}
}
for len(i.stack) > 0 {
// Inspect the last element of the stack.
n := len(i.stack)
last := i.stack[n-1]
elem := last.edges[0].node
// Update the stack.
if len(last.edges) > 1 {
i.stack[n-1].edges = last.edges[1:]
} else {
i.stack = i.stack[:n-1]
}
// Push the edges onto the frontier.
if len(elem.edges) > 0 {
path := last.path + string(elem.prefix)
i.stack = append(i.stack, rawStackEntry{path, elem.edges})
}
i.pos = elem
i.path = last.path + string(elem.prefix)
return
}
i.pos = nil
i.path = ""
}

21
vendor/github.com/mitchellh/hashstructure/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Mitchell Hashimoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

65
vendor/github.com/mitchellh/hashstructure/README.md generated vendored Normal file
View file

@ -0,0 +1,65 @@
# hashstructure [![GoDoc](https://godoc.org/github.com/mitchellh/hashstructure?status.svg)](https://godoc.org/github.com/mitchellh/hashstructure)
hashstructure is a Go library for creating a unique hash value
for arbitrary values in Go.
This can be used to key values in a hash (for use in a map, set, etc.)
that are complex. The most common use case is comparing two values without
sending data across the network, caching values locally (de-dup), and so on.
## Features
* Hash any arbitrary Go value, including complex types.
* Tag a struct field to ignore it and not affect the hash value.
* Tag a slice type struct field to treat it as a set where ordering
doesn't affect the hash code but the field itself is still taken into
account to create the hash value.
* Optionally specify a custom hash function to optimize for speed, collision
avoidance for your data set, etc.
* Optionally hash the output of `.String()` on structs that implement fmt.Stringer,
allowing effective hashing of time.Time
## Installation
Standard `go get`:
```
$ go get github.com/mitchellh/hashstructure
```
## Usage & Example
For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure).
A quick code example is shown below:
```go
type ComplexStruct struct {
Name string
Age uint
Metadata map[string]interface{}
}
v := ComplexStruct{
Name: "mitchellh",
Age: 64,
Metadata: map[string]interface{}{
"car": true,
"location": "California",
"siblings": []string{"Bob", "John"},
},
}
hash, err := hashstructure.Hash(v, nil)
if err != nil {
panic(err)
}
fmt.Printf("%d", hash)
// Output:
// 2307517237273902113
```

View file

@ -0,0 +1,358 @@
package hashstructure
import (
"encoding/binary"
"fmt"
"hash"
"hash/fnv"
"reflect"
)
// ErrNotStringer is returned when there's an error with hash:"string"
type ErrNotStringer struct {
Field string
}
// Error implements error for ErrNotStringer
func (ens *ErrNotStringer) Error() string {
return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field)
}
// HashOptions are options that are available for hashing.
type HashOptions struct {
// Hasher is the hash function to use. If this isn't set, it will
// default to FNV.
Hasher hash.Hash64
// TagName is the struct tag to look at when hashing the structure.
// By default this is "hash".
TagName string
// ZeroNil is flag determining if nil pointer should be treated equal
// to a zero value of pointed type. By default this is false.
ZeroNil bool
}
// Hash returns the hash value of an arbitrary value.
//
// If opts is nil, then default options will be used. See HashOptions
// for the default values. The same *HashOptions value cannot be used
// concurrently. None of the values within a *HashOptions struct are
// safe to read/write while hashing is being done.
//
// Notes on the value:
//
// * Unexported fields on structs are ignored and do not affect the
// hash value.
//
// * Adding an exported field to a struct with the zero value will change
// the hash value.
//
// For structs, the hashing can be controlled using tags. For example:
//
// struct {
// Name string
// UUID string `hash:"ignore"`
// }
//
// The available tag values are:
//
// * "ignore" or "-" - The field will be ignored and not affect the hash code.
//
// * "set" - The field will be treated as a set, where ordering doesn't
// affect the hash code. This only works for slices.
//
// * "string" - The field will be hashed as a string, only works when the
// field implements fmt.Stringer
//
func Hash(v interface{}, opts *HashOptions) (uint64, error) {
// Create default options
if opts == nil {
opts = &HashOptions{}
}
if opts.Hasher == nil {
opts.Hasher = fnv.New64()
}
if opts.TagName == "" {
opts.TagName = "hash"
}
// Reset the hash
opts.Hasher.Reset()
// Create our walker and walk the structure
w := &walker{
h: opts.Hasher,
tag: opts.TagName,
zeronil: opts.ZeroNil,
}
return w.visit(reflect.ValueOf(v), nil)
}
type walker struct {
h hash.Hash64
tag string
zeronil bool
}
type visitOpts struct {
// Flags are a bitmask of flags to affect behavior of this visit
Flags visitFlag
// Information about the struct containing this field
Struct interface{}
StructField string
}
func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) {
t := reflect.TypeOf(0)
// Loop since these can be wrapped in multiple layers of pointers
// and interfaces.
for {
// If we have an interface, dereference it. We have to do this up
// here because it might be a nil in there and the check below must
// catch that.
if v.Kind() == reflect.Interface {
v = v.Elem()
continue
}
if v.Kind() == reflect.Ptr {
if w.zeronil {
t = v.Type().Elem()
}
v = reflect.Indirect(v)
continue
}
break
}
// If it is nil, treat it like a zero.
if !v.IsValid() {
v = reflect.Zero(t)
}
// Binary writing can use raw ints, we have to convert to
// a sized-int, we'll choose the largest...
switch v.Kind() {
case reflect.Int:
v = reflect.ValueOf(int64(v.Int()))
case reflect.Uint:
v = reflect.ValueOf(uint64(v.Uint()))
case reflect.Bool:
var tmp int8
if v.Bool() {
tmp = 1
}
v = reflect.ValueOf(tmp)
}
k := v.Kind()
// We can shortcut numeric values by directly binary writing them
if k >= reflect.Int && k <= reflect.Complex64 {
// A direct hash calculation
w.h.Reset()
err := binary.Write(w.h, binary.LittleEndian, v.Interface())
return w.h.Sum64(), err
}
switch k {
case reflect.Array:
var h uint64
l := v.Len()
for i := 0; i < l; i++ {
current, err := w.visit(v.Index(i), nil)
if err != nil {
return 0, err
}
h = hashUpdateOrdered(w.h, h, current)
}
return h, nil
case reflect.Map:
var includeMap IncludableMap
if opts != nil && opts.Struct != nil {
if v, ok := opts.Struct.(IncludableMap); ok {
includeMap = v
}
}
// Build the hash for the map. We do this by XOR-ing all the key
// and value hashes. This makes it deterministic despite ordering.
var h uint64
for _, k := range v.MapKeys() {
v := v.MapIndex(k)
if includeMap != nil {
incl, err := includeMap.HashIncludeMap(
opts.StructField, k.Interface(), v.Interface())
if err != nil {
return 0, err
}
if !incl {
continue
}
}
kh, err := w.visit(k, nil)
if err != nil {
return 0, err
}
vh, err := w.visit(v, nil)
if err != nil {
return 0, err
}
fieldHash := hashUpdateOrdered(w.h, kh, vh)
h = hashUpdateUnordered(h, fieldHash)
}
return h, nil
case reflect.Struct:
parent := v.Interface()
var include Includable
if impl, ok := parent.(Includable); ok {
include = impl
}
t := v.Type()
h, err := w.visit(reflect.ValueOf(t.Name()), nil)
if err != nil {
return 0, err
}
l := v.NumField()
for i := 0; i < l; i++ {
if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
var f visitFlag
fieldType := t.Field(i)
if fieldType.PkgPath != "" {
// Unexported
continue
}
tag := fieldType.Tag.Get(w.tag)
if tag == "ignore" || tag == "-" {
// Ignore this field
continue
}
// if string is set, use the string value
if tag == "string" {
if impl, ok := innerV.Interface().(fmt.Stringer); ok {
innerV = reflect.ValueOf(impl.String())
} else {
return 0, &ErrNotStringer{
Field: v.Type().Field(i).Name,
}
}
}
// Check if we implement includable and check it
if include != nil {
incl, err := include.HashInclude(fieldType.Name, innerV)
if err != nil {
return 0, err
}
if !incl {
continue
}
}
switch tag {
case "set":
f |= visitFlagSet
}
kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil)
if err != nil {
return 0, err
}
vh, err := w.visit(innerV, &visitOpts{
Flags: f,
Struct: parent,
StructField: fieldType.Name,
})
if err != nil {
return 0, err
}
fieldHash := hashUpdateOrdered(w.h, kh, vh)
h = hashUpdateUnordered(h, fieldHash)
}
}
return h, nil
case reflect.Slice:
// We have two behaviors here. If it isn't a set, then we just
// visit all the elements. If it is a set, then we do a deterministic
// hash code.
var h uint64
var set bool
if opts != nil {
set = (opts.Flags & visitFlagSet) != 0
}
l := v.Len()
for i := 0; i < l; i++ {
current, err := w.visit(v.Index(i), nil)
if err != nil {
return 0, err
}
if set {
h = hashUpdateUnordered(h, current)
} else {
h = hashUpdateOrdered(w.h, h, current)
}
}
return h, nil
case reflect.String:
// Directly hash
w.h.Reset()
_, err := w.h.Write([]byte(v.String()))
return w.h.Sum64(), err
default:
return 0, fmt.Errorf("unknown kind to hash: %s", k)
}
}
func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 {
// For ordered updates, use a real hash function
h.Reset()
// We just panic if the binary writes fail because we are writing
// an int64 which should never be fail-able.
e1 := binary.Write(h, binary.LittleEndian, a)
e2 := binary.Write(h, binary.LittleEndian, b)
if e1 != nil {
panic(e1)
}
if e2 != nil {
panic(e2)
}
return h.Sum64()
}
func hashUpdateUnordered(a, b uint64) uint64 {
return a ^ b
}
// visitFlag is used as a bitmask for affecting visit behavior
type visitFlag uint
const (
visitFlagInvalid visitFlag = iota
visitFlagSet = iota << 1
)

15
vendor/github.com/mitchellh/hashstructure/include.go generated vendored Normal file
View file

@ -0,0 +1,15 @@
package hashstructure
// Includable is an interface that can optionally be implemented by
// a struct. It will be called for each field in the struct to check whether
// it should be included in the hash.
type Includable interface {
HashInclude(field string, v interface{}) (bool, error)
}
// IncludableMap is an interface that can optionally be implemented by
// a struct. It will be called when a map-type field is found to ask the
// struct if the map item should be included in the hash.
type IncludableMap interface {
HashIncludeMap(field string, k, v interface{}) (bool, error)
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,121 @@
syntax = "proto3";
package moby.buildkit.v1;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
import "github.com/moby/buildkit/solver/pb/ops.proto";
option (gogoproto.sizer_all) = true;
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
service Control {
rpc DiskUsage(DiskUsageRequest) returns (DiskUsageResponse);
rpc Prune(PruneRequest) returns (stream UsageRecord);
rpc Solve(SolveRequest) returns (SolveResponse);
rpc Status(StatusRequest) returns (stream StatusResponse);
rpc Session(stream BytesMessage) returns (stream BytesMessage);
rpc ListWorkers(ListWorkersRequest) returns (ListWorkersResponse);
}
message PruneRequest {
// TODO: filter
}
message DiskUsageRequest {
string filter = 1; // FIXME: this should be containerd-compatible repeated string?
}
message DiskUsageResponse {
repeated UsageRecord record = 1;
}
message UsageRecord {
string ID = 1;
bool Mutable = 2;
bool InUse = 3;
int64 Size = 4;
string Parent = 5;
google.protobuf.Timestamp CreatedAt = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
google.protobuf.Timestamp LastUsedAt = 7 [(gogoproto.stdtime) = true];
int64 UsageCount = 8;
string Description = 9;
}
message SolveRequest {
string Ref = 1;
pb.Definition Definition = 2;
string Exporter = 3;
map<string, string> ExporterAttrs = 4;
string Session = 5;
string Frontend = 6;
map<string, string> FrontendAttrs = 7;
CacheOptions Cache = 8 [(gogoproto.nullable) = false];
}
message CacheOptions {
string ExportRef = 1;
repeated string ImportRefs = 2;
map<string, string> ExportAttrs = 3;
}
message SolveResponse {
map<string, string> ExporterResponse = 1;
}
message StatusRequest {
string Ref = 1;
}
message StatusResponse {
repeated Vertex vertexes = 1;
repeated VertexStatus statuses = 2;
repeated VertexLog logs = 3;
}
message Vertex {
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
repeated string inputs = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
string name = 3;
bool cached = 4;
google.protobuf.Timestamp started = 5 [(gogoproto.stdtime) = true ];
google.protobuf.Timestamp completed = 6 [(gogoproto.stdtime) = true ];
string error = 7; // typed errors?
}
message VertexStatus {
string ID = 1;
string vertex = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
string name = 3;
int64 current = 4;
int64 total = 5;
// TODO: add started, completed
google.protobuf.Timestamp timestamp = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
google.protobuf.Timestamp started = 7 [(gogoproto.stdtime) = true ];
google.protobuf.Timestamp completed = 8 [(gogoproto.stdtime) = true ];
}
message VertexLog {
string vertex = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
google.protobuf.Timestamp timestamp = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
int64 stream = 3;
bytes msg = 4;
}
message BytesMessage {
bytes data = 1;
}
message ListWorkersRequest {
repeated string filter = 1; // containerd style
}
message ListWorkersResponse {
repeated WorkerRecord record = 1;
}
message WorkerRecord {
string ID = 1;
map<string, string> Labels = 2;
}

View file

@ -0,0 +1,3 @@
package moby_buildkit_v1
//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. control.proto

View file

@ -0,0 +1,634 @@
package contenthash
import (
"bytes"
"context"
"crypto/sha256"
"io"
"os"
"path"
"path/filepath"
"sync"
"github.com/containerd/continuity/fs"
"github.com/docker/docker/pkg/locker"
iradix "github.com/hashicorp/go-immutable-radix"
"github.com/hashicorp/golang-lru/simplelru"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/snapshot"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil"
)
var errNotFound = errors.Errorf("not found")
var defaultManager *cacheManager
var defaultManagerOnce sync.Once
const keyContentHash = "buildkit.contenthash.v0"
func getDefaultManager() *cacheManager {
defaultManagerOnce.Do(func() {
lru, _ := simplelru.NewLRU(20, nil) // error is impossible on positive size
defaultManager = &cacheManager{lru: lru, locker: locker.New()}
})
return defaultManager
}
// Layout in the radix tree: Every path is saved by cleaned absolute unix path.
// Directories have 2 records, one contains digest for directory header, other
// the recursive digest for directory contents. "/dir/" is the record for
// header, "/dir" is for contents. For the root node "" (empty string) is the
// key for root, "/" for the root header
func Checksum(ctx context.Context, ref cache.ImmutableRef, path string) (digest.Digest, error) {
return getDefaultManager().Checksum(ctx, ref, path)
}
func GetCacheContext(ctx context.Context, md *metadata.StorageItem) (CacheContext, error) {
return getDefaultManager().GetCacheContext(ctx, md)
}
func SetCacheContext(ctx context.Context, md *metadata.StorageItem, cc CacheContext) error {
return getDefaultManager().SetCacheContext(ctx, md, cc)
}
type CacheContext interface {
Checksum(ctx context.Context, ref cache.Mountable, p string) (digest.Digest, error)
HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) error
}
type Hashed interface {
Digest() digest.Digest
}
type cacheManager struct {
locker *locker.Locker
lru *simplelru.LRU
lruMu sync.Mutex
}
func (cm *cacheManager) Checksum(ctx context.Context, ref cache.ImmutableRef, p string) (digest.Digest, error) {
cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata()))
if err != nil {
return "", nil
}
return cc.Checksum(ctx, ref, p)
}
func (cm *cacheManager) GetCacheContext(ctx context.Context, md *metadata.StorageItem) (CacheContext, error) {
cm.locker.Lock(md.ID())
cm.lruMu.Lock()
v, ok := cm.lru.Get(md.ID())
cm.lruMu.Unlock()
if ok {
cm.locker.Unlock(md.ID())
return v.(*cacheContext), nil
}
cc, err := newCacheContext(md)
if err != nil {
cm.locker.Unlock(md.ID())
return nil, err
}
cm.lruMu.Lock()
cm.lru.Add(md.ID(), cc)
cm.lruMu.Unlock()
cm.locker.Unlock(md.ID())
return cc, nil
}
func (cm *cacheManager) SetCacheContext(ctx context.Context, md *metadata.StorageItem, cci CacheContext) error {
cc, ok := cci.(*cacheContext)
if !ok {
return errors.Errorf("invalid cachecontext: %T", cc)
}
if md.ID() != cc.md.ID() {
cc = &cacheContext{
md: md,
tree: cci.(*cacheContext).tree,
dirtyMap: map[string]struct{}{},
}
} else {
if err := cc.save(); err != nil {
return err
}
}
cm.lruMu.Lock()
cm.lru.Add(md.ID(), cc)
cm.lruMu.Unlock()
return nil
}
type cacheContext struct {
mu sync.RWMutex
md *metadata.StorageItem
tree *iradix.Tree
dirty bool // needs to be persisted to disk
// used in HandleChange
txn *iradix.Txn
node *iradix.Node
dirtyMap map[string]struct{}
}
type mount struct {
mountable cache.Mountable
mountPath string
unmount func() error
}
func (m *mount) mount(ctx context.Context) (string, error) {
if m.mountPath != "" {
return m.mountPath, nil
}
mounts, err := m.mountable.Mount(ctx, true)
if err != nil {
return "", err
}
lm := snapshot.LocalMounter(mounts)
mp, err := lm.Mount()
if err != nil {
return "", err
}
m.mountPath = mp
m.unmount = lm.Unmount
return mp, nil
}
func (m *mount) clean() error {
if m.mountPath != "" {
if err := m.unmount(); err != nil {
return err
}
m.mountPath = ""
}
return nil
}
func newCacheContext(md *metadata.StorageItem) (*cacheContext, error) {
cc := &cacheContext{
md: md,
tree: iradix.New(),
dirtyMap: map[string]struct{}{},
}
if err := cc.load(); err != nil {
return nil, err
}
return cc, nil
}
func (cc *cacheContext) load() error {
dt, err := cc.md.GetExternal(keyContentHash)
if err != nil {
return nil
}
var l CacheRecords
if err := l.Unmarshal(dt); err != nil {
return err
}
txn := cc.tree.Txn()
for _, p := range l.Paths {
txn.Insert([]byte(p.Path), p.Record)
}
cc.tree = txn.Commit()
return nil
}
func (cc *cacheContext) save() error {
cc.mu.Lock()
defer cc.mu.Unlock()
if cc.txn != nil {
cc.commitActiveTransaction()
}
var l CacheRecords
node := cc.tree.Root()
node.Walk(func(k []byte, v interface{}) bool {
l.Paths = append(l.Paths, &CacheRecordWithPath{
Path: string(k),
Record: v.(*CacheRecord),
})
return false
})
dt, err := l.Marshal()
if err != nil {
return err
}
return cc.md.SetExternal(keyContentHash, dt)
}
// HandleChange notifies the source about a modification operation
func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) {
p = path.Join("/", filepath.ToSlash(p))
if p == "/" {
p = ""
}
k := convertPathToKey([]byte(p))
deleteDir := func(cr *CacheRecord) {
if cr.Type == CacheRecordTypeDir {
cc.node.WalkPrefix(append(k, 0), func(k []byte, v interface{}) bool {
cc.txn.Delete(k)
return false
})
}
}
cc.mu.Lock()
defer cc.mu.Unlock()
if cc.txn == nil {
cc.txn = cc.tree.Txn()
cc.node = cc.tree.Root()
// root is not called by HandleChange. need to fake it
if _, ok := cc.node.Get([]byte{0}); !ok {
cc.txn.Insert([]byte{0}, &CacheRecord{
Type: CacheRecordTypeDirHeader,
Digest: digest.FromBytes(nil),
})
cc.txn.Insert([]byte(""), &CacheRecord{
Type: CacheRecordTypeDir,
})
}
}
if kind == fsutil.ChangeKindDelete {
v, ok := cc.txn.Delete(k)
if ok {
deleteDir(v.(*CacheRecord))
}
d := path.Dir(p)
if d == "/" {
d = ""
}
cc.dirtyMap[d] = struct{}{}
return
}
stat, ok := fi.Sys().(*fsutil.Stat)
if !ok {
return errors.Errorf("%s invalid change without stat information", p)
}
h, ok := fi.(Hashed)
if !ok {
return errors.Errorf("invalid fileinfo: %s", p)
}
v, ok := cc.node.Get(k)
if ok {
deleteDir(v.(*CacheRecord))
}
cr := &CacheRecord{
Type: CacheRecordTypeFile,
}
if fi.Mode()&os.ModeSymlink != 0 {
cr.Type = CacheRecordTypeSymlink
cr.Linkname = filepath.ToSlash(stat.Linkname)
}
if fi.IsDir() {
cr.Type = CacheRecordTypeDirHeader
cr2 := &CacheRecord{
Type: CacheRecordTypeDir,
}
cc.txn.Insert(k, cr2)
k = append(k, 0)
p += "/"
}
cr.Digest = h.Digest()
cc.txn.Insert(k, cr)
d := path.Dir(p)
if d == "/" {
d = ""
}
cc.dirtyMap[d] = struct{}{}
return nil
}
func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable, p string) (digest.Digest, error) {
m := &mount{mountable: mountable}
defer m.clean()
const maxSymlinkLimit = 255
i := 0
for {
if i > maxSymlinkLimit {
return "", errors.Errorf("too many symlinks: %s", p)
}
cr, err := cc.checksumNoFollow(ctx, m, p)
if err != nil {
return "", err
}
if cr.Type == CacheRecordTypeSymlink {
link := cr.Linkname
if !path.IsAbs(cr.Linkname) {
link = path.Join(path.Dir(p), link)
}
i++
p = link
} else {
return cr.Digest, nil
}
}
}
func (cc *cacheContext) checksumNoFollow(ctx context.Context, m *mount, p string) (*CacheRecord, error) {
p = path.Join("/", filepath.ToSlash(p))
if p == "/" {
p = ""
}
cc.mu.RLock()
if cc.txn == nil {
root := cc.tree.Root()
cc.mu.RUnlock()
v, ok := root.Get(convertPathToKey([]byte(p)))
if ok {
cr := v.(*CacheRecord)
if cr.Digest != "" {
return cr, nil
}
}
} else {
cc.mu.RUnlock()
}
cc.mu.Lock()
defer cc.mu.Unlock()
if cc.txn != nil {
cc.commitActiveTransaction()
}
defer func() {
if cc.dirty {
go cc.save()
cc.dirty = false
}
}()
return cc.lazyChecksum(ctx, m, p)
}
func (cc *cacheContext) commitActiveTransaction() {
for d := range cc.dirtyMap {
addParentToMap(d, cc.dirtyMap)
}
for d := range cc.dirtyMap {
k := convertPathToKey([]byte(d))
if _, ok := cc.txn.Get(k); ok {
cc.txn.Insert(k, &CacheRecord{Type: CacheRecordTypeDir})
}
}
cc.tree = cc.txn.Commit()
cc.node = nil
cc.dirtyMap = map[string]struct{}{}
cc.txn = nil
}
func (cc *cacheContext) lazyChecksum(ctx context.Context, m *mount, p string) (*CacheRecord, error) {
root := cc.tree.Root()
if cc.needsScan(root, p) {
if err := cc.scanPath(ctx, m, p); err != nil {
return nil, err
}
}
k := convertPathToKey([]byte(p))
txn := cc.tree.Txn()
root = txn.Root()
cr, updated, err := cc.checksum(ctx, root, txn, m, k)
if err != nil {
return nil, err
}
cc.tree = txn.Commit()
cc.dirty = updated
return cr, err
}
func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *iradix.Txn, m *mount, k []byte) (*CacheRecord, bool, error) {
v, ok := root.Get(k)
if !ok {
return nil, false, errors.Wrapf(errNotFound, "%s not found", convertKeyToPath(k))
}
cr := v.(*CacheRecord)
if cr.Digest != "" {
return cr, false, nil
}
var dgst digest.Digest
switch cr.Type {
case CacheRecordTypeDir:
h := sha256.New()
next := append(k, 0)
iter := root.Seek(next)
subk := next
ok := true
for {
if !ok || !bytes.HasPrefix(subk, next) {
break
}
h.Write(bytes.TrimPrefix(subk, k))
subcr, _, err := cc.checksum(ctx, root, txn, m, subk)
if err != nil {
return nil, false, err
}
h.Write([]byte(subcr.Digest))
if subcr.Type == CacheRecordTypeDir { // skip subfiles
next := append(subk, 0, 0xff)
iter = root.Seek(next)
}
subk, _, ok = iter.Next()
}
dgst = digest.NewDigest(digest.SHA256, h)
default:
p := string(convertKeyToPath(bytes.TrimSuffix(k, []byte{0})))
target, err := m.mount(ctx)
if err != nil {
return nil, false, err
}
// no FollowSymlinkInScope because invalid paths should not be inserted
fp := filepath.Join(target, filepath.FromSlash(p))
fi, err := os.Lstat(fp)
if err != nil {
return nil, false, err
}
dgst, err = prepareDigest(fp, p, fi)
if err != nil {
return nil, false, err
}
}
cr2 := &CacheRecord{
Digest: dgst,
Type: cr.Type,
Linkname: cr.Linkname,
}
txn.Insert(k, cr2)
return cr2, true, nil
}
func (cc *cacheContext) needsScan(root *iradix.Node, p string) bool {
if p == "/" {
p = ""
}
if _, ok := root.Get(convertPathToKey([]byte(p))); !ok {
if p == "" {
return true
}
return cc.needsScan(root, path.Clean(path.Dir(p)))
}
return false
}
func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retErr error) {
p = path.Join("/", p)
d, _ := path.Split(p)
mp, err := m.mount(ctx)
if err != nil {
return err
}
parentPath, err := fs.RootPath(mp, filepath.FromSlash(d))
if err != nil {
return err
}
n := cc.tree.Root()
txn := cc.tree.Txn()
err = filepath.Walk(parentPath, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return errors.Wrapf(err, "failed to walk %s", path)
}
rel, err := filepath.Rel(mp, path)
if err != nil {
return err
}
k := []byte(filepath.Join("/", filepath.ToSlash(rel)))
if string(k) == "/" {
k = []byte{}
}
k = convertPathToKey(k)
if _, ok := n.Get(k); !ok {
cr := &CacheRecord{
Type: CacheRecordTypeFile,
}
if fi.Mode()&os.ModeSymlink != 0 {
cr.Type = CacheRecordTypeSymlink
link, err := os.Readlink(path)
if err != nil {
return err
}
cr.Linkname = filepath.ToSlash(link)
}
if fi.IsDir() {
cr.Type = CacheRecordTypeDirHeader
cr2 := &CacheRecord{
Type: CacheRecordTypeDir,
}
txn.Insert(k, cr2)
k = append(k, 0)
}
txn.Insert(k, cr)
}
return nil
})
if err != nil {
return err
}
cc.tree = txn.Commit()
return nil
}
func prepareDigest(fp, p string, fi os.FileInfo) (digest.Digest, error) {
h, err := NewFileHash(fp, fi)
if err != nil {
return "", errors.Wrapf(err, "failed to create hash for %s", p)
}
if fi.Mode().IsRegular() && fi.Size() > 0 {
// TODO: would be nice to put the contents to separate hash first
// so it can be cached for hardlinks
f, err := os.Open(fp)
if err != nil {
return "", errors.Wrapf(err, "failed to open %s", p)
}
defer f.Close()
if _, err := poolsCopy(h, f); err != nil {
return "", errors.Wrapf(err, "failed to copy file data for %s", p)
}
}
return digest.NewDigest(digest.SHA256, h), nil
}
func addParentToMap(d string, m map[string]struct{}) {
if d == "" {
return
}
d = path.Dir(d)
if d == "/" {
d = ""
}
m[d] = struct{}{}
addParentToMap(d, m)
}
func ensureOriginMetadata(md *metadata.StorageItem) *metadata.StorageItem {
v := md.Get("cache.equalMutable") // TODO: const
if v == nil {
return md
}
var mutable string
if err := v.Unmarshal(&mutable); err != nil {
return md
}
si, ok := md.Storage().Get(mutable)
if ok {
return si
}
return md
}
var pool32K = sync.Pool{
New: func() interface{} { return make([]byte, 32*1024) }, // 32K
}
func poolsCopy(dst io.Writer, src io.Reader) (written int64, err error) {
buf := pool32K.Get().([]byte)
written, err = io.CopyBuffer(dst, src, buf)
pool32K.Put(buf)
return
}
func convertPathToKey(p []byte) []byte {
return bytes.Replace([]byte(p), []byte("/"), []byte{0}, -1)
}
func convertKeyToPath(p []byte) []byte {
return bytes.Replace([]byte(p), []byte{0}, []byte("/"), -1)
}

View file

@ -0,0 +1,755 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: checksum.proto
/*
Package contenthash is a generated protocol buffer package.
It is generated from these files:
checksum.proto
It has these top-level messages:
CacheRecord
CacheRecordWithPath
CacheRecords
*/
package contenthash
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type CacheRecordType int32
const (
CacheRecordTypeFile CacheRecordType = 0
CacheRecordTypeDir CacheRecordType = 1
CacheRecordTypeDirHeader CacheRecordType = 2
CacheRecordTypeSymlink CacheRecordType = 3
)
var CacheRecordType_name = map[int32]string{
0: "FILE",
1: "DIR",
2: "DIR_HEADER",
3: "SYMLINK",
}
var CacheRecordType_value = map[string]int32{
"FILE": 0,
"DIR": 1,
"DIR_HEADER": 2,
"SYMLINK": 3,
}
func (x CacheRecordType) String() string {
return proto.EnumName(CacheRecordType_name, int32(x))
}
func (CacheRecordType) EnumDescriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{0} }
type CacheRecord struct {
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
Type CacheRecordType `protobuf:"varint,2,opt,name=type,proto3,enum=contenthash.CacheRecordType" json:"type,omitempty"`
Linkname string `protobuf:"bytes,3,opt,name=linkname,proto3" json:"linkname,omitempty"`
}
func (m *CacheRecord) Reset() { *m = CacheRecord{} }
func (m *CacheRecord) String() string { return proto.CompactTextString(m) }
func (*CacheRecord) ProtoMessage() {}
func (*CacheRecord) Descriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{0} }
func (m *CacheRecord) GetType() CacheRecordType {
if m != nil {
return m.Type
}
return CacheRecordTypeFile
}
func (m *CacheRecord) GetLinkname() string {
if m != nil {
return m.Linkname
}
return ""
}
type CacheRecordWithPath struct {
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
Record *CacheRecord `protobuf:"bytes,2,opt,name=record" json:"record,omitempty"`
}
func (m *CacheRecordWithPath) Reset() { *m = CacheRecordWithPath{} }
func (m *CacheRecordWithPath) String() string { return proto.CompactTextString(m) }
func (*CacheRecordWithPath) ProtoMessage() {}
func (*CacheRecordWithPath) Descriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{1} }
func (m *CacheRecordWithPath) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func (m *CacheRecordWithPath) GetRecord() *CacheRecord {
if m != nil {
return m.Record
}
return nil
}
type CacheRecords struct {
Paths []*CacheRecordWithPath `protobuf:"bytes,1,rep,name=paths" json:"paths,omitempty"`
}
func (m *CacheRecords) Reset() { *m = CacheRecords{} }
func (m *CacheRecords) String() string { return proto.CompactTextString(m) }
func (*CacheRecords) ProtoMessage() {}
func (*CacheRecords) Descriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{2} }
func (m *CacheRecords) GetPaths() []*CacheRecordWithPath {
if m != nil {
return m.Paths
}
return nil
}
func init() {
proto.RegisterType((*CacheRecord)(nil), "contenthash.CacheRecord")
proto.RegisterType((*CacheRecordWithPath)(nil), "contenthash.CacheRecordWithPath")
proto.RegisterType((*CacheRecords)(nil), "contenthash.CacheRecords")
proto.RegisterEnum("contenthash.CacheRecordType", CacheRecordType_name, CacheRecordType_value)
}
func (m *CacheRecord) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CacheRecord) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Digest) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintChecksum(dAtA, i, uint64(len(m.Digest)))
i += copy(dAtA[i:], m.Digest)
}
if m.Type != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintChecksum(dAtA, i, uint64(m.Type))
}
if len(m.Linkname) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintChecksum(dAtA, i, uint64(len(m.Linkname)))
i += copy(dAtA[i:], m.Linkname)
}
return i, nil
}
func (m *CacheRecordWithPath) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CacheRecordWithPath) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Path) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintChecksum(dAtA, i, uint64(len(m.Path)))
i += copy(dAtA[i:], m.Path)
}
if m.Record != nil {
dAtA[i] = 0x12
i++
i = encodeVarintChecksum(dAtA, i, uint64(m.Record.Size()))
n1, err := m.Record.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n1
}
return i, nil
}
func (m *CacheRecords) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CacheRecords) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Paths) > 0 {
for _, msg := range m.Paths {
dAtA[i] = 0xa
i++
i = encodeVarintChecksum(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
}
func encodeVarintChecksum(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *CacheRecord) Size() (n int) {
var l int
_ = l
l = len(m.Digest)
if l > 0 {
n += 1 + l + sovChecksum(uint64(l))
}
if m.Type != 0 {
n += 1 + sovChecksum(uint64(m.Type))
}
l = len(m.Linkname)
if l > 0 {
n += 1 + l + sovChecksum(uint64(l))
}
return n
}
func (m *CacheRecordWithPath) Size() (n int) {
var l int
_ = l
l = len(m.Path)
if l > 0 {
n += 1 + l + sovChecksum(uint64(l))
}
if m.Record != nil {
l = m.Record.Size()
n += 1 + l + sovChecksum(uint64(l))
}
return n
}
func (m *CacheRecords) Size() (n int) {
var l int
_ = l
if len(m.Paths) > 0 {
for _, e := range m.Paths {
l = e.Size()
n += 1 + l + sovChecksum(uint64(l))
}
}
return n
}
func sovChecksum(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozChecksum(x uint64) (n int) {
return sovChecksum(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *CacheRecord) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CacheRecord: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CacheRecord: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthChecksum
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Type |= (CacheRecordType(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Linkname", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthChecksum
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Linkname = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipChecksum(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthChecksum
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CacheRecordWithPath) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CacheRecordWithPath: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CacheRecordWithPath: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthChecksum
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Path = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthChecksum
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Record == nil {
m.Record = &CacheRecord{}
}
if err := m.Record.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipChecksum(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthChecksum
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CacheRecords) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CacheRecords: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CacheRecords: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthChecksum
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Paths = append(m.Paths, &CacheRecordWithPath{})
if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipChecksum(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthChecksum
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipChecksum(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowChecksum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowChecksum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowChecksum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthChecksum
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowChecksum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipChecksum(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthChecksum = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowChecksum = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("checksum.proto", fileDescriptorChecksum) }
var fileDescriptorChecksum = []byte{
// 418 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xc1, 0x6a, 0xd4, 0x40,
0x18, 0xc7, 0x77, 0xba, 0xeb, 0xaa, 0xdf, 0x4a, 0x0d, 0x53, 0x68, 0xc3, 0x50, 0xb2, 0xe3, 0x5e,
0x5c, 0x8a, 0xcd, 0x96, 0x08, 0xde, 0xad, 0xd9, 0xa5, 0xd1, 0x2a, 0x32, 0x15, 0x44, 0x3c, 0x48,
0x36, 0x3b, 0x66, 0x42, 0x9b, 0x4c, 0x48, 0x66, 0x0f, 0xfb, 0x06, 0x92, 0x93, 0x2f, 0x90, 0x93,
0x82, 0xef, 0xe0, 0x5d, 0xe8, 0xd1, 0xb3, 0x87, 0x22, 0xeb, 0x8b, 0x48, 0x26, 0x55, 0x42, 0xca,
0x9e, 0xe6, 0xfb, 0x66, 0x7e, 0xdf, 0xff, 0xff, 0x9f, 0x61, 0x60, 0x3b, 0x10, 0x3c, 0x38, 0xcf,
0x97, 0xb1, 0x9d, 0x66, 0x52, 0x49, 0x3c, 0x08, 0x64, 0xa2, 0x78, 0xa2, 0x84, 0x9f, 0x0b, 0x72,
0x18, 0x46, 0x4a, 0x2c, 0xe7, 0x76, 0x20, 0xe3, 0x49, 0x28, 0x43, 0x39, 0xd1, 0xcc, 0x7c, 0xf9,
0x51, 0x77, 0xba, 0xd1, 0x55, 0x3d, 0x3b, 0xfa, 0x86, 0x60, 0xf0, 0xcc, 0x0f, 0x04, 0x67, 0x3c,
0x90, 0xd9, 0x02, 0x3f, 0x87, 0xfe, 0x22, 0x0a, 0x79, 0xae, 0x4c, 0x44, 0xd1, 0xf8, 0xee, 0xb1,
0x73, 0x79, 0x35, 0xec, 0xfc, 0xba, 0x1a, 0x1e, 0x34, 0x64, 0x65, 0xca, 0x93, 0xca, 0xd2, 0x8f,
0x12, 0x9e, 0xe5, 0x93, 0x50, 0x1e, 0xd6, 0x23, 0xb6, 0xab, 0x17, 0x76, 0xad, 0x80, 0x8f, 0xa0,
0xa7, 0x56, 0x29, 0x37, 0xb7, 0x28, 0x1a, 0x6f, 0x3b, 0xfb, 0x76, 0x23, 0xa6, 0xdd, 0xf0, 0x7c,
0xb3, 0x4a, 0x39, 0xd3, 0x24, 0x26, 0x70, 0xe7, 0x22, 0x4a, 0xce, 0x13, 0x3f, 0xe6, 0x66, 0xb7,
0xf2, 0x67, 0xff, 0xfb, 0xd1, 0x7b, 0xd8, 0x69, 0x0c, 0xbd, 0x8d, 0x94, 0x78, 0xed, 0x2b, 0x81,
0x31, 0xf4, 0x52, 0x5f, 0x89, 0x3a, 0x2e, 0xd3, 0x35, 0x3e, 0x82, 0x7e, 0xa6, 0x29, 0x6d, 0x3d,
0x70, 0xcc, 0x4d, 0xd6, 0xec, 0x9a, 0x1b, 0xcd, 0xe0, 0x5e, 0x63, 0x3b, 0xc7, 0x4f, 0xe0, 0x56,
0xa5, 0x94, 0x9b, 0x88, 0x76, 0xc7, 0x03, 0x87, 0x6e, 0x12, 0xf8, 0x17, 0x83, 0xd5, 0xf8, 0xc1,
0x0f, 0x04, 0xf7, 0x5b, 0x57, 0xc3, 0x0f, 0xa0, 0x37, 0xf3, 0x4e, 0xa7, 0x46, 0x87, 0xec, 0x15,
0x25, 0xdd, 0x69, 0x1d, 0xcf, 0xa2, 0x0b, 0x8e, 0x87, 0xd0, 0x75, 0x3d, 0x66, 0x20, 0xb2, 0x5b,
0x94, 0x14, 0xb7, 0x08, 0x37, 0xca, 0xf0, 0x23, 0x00, 0xd7, 0x63, 0x1f, 0x4e, 0xa6, 0x4f, 0xdd,
0x29, 0x33, 0xb6, 0xc8, 0x7e, 0x51, 0x52, 0xf3, 0x26, 0x77, 0xc2, 0xfd, 0x05, 0xcf, 0xf0, 0x43,
0xb8, 0x7d, 0xf6, 0xee, 0xe5, 0xa9, 0xf7, 0xea, 0x85, 0xd1, 0x25, 0xa4, 0x28, 0xe9, 0x6e, 0x0b,
0x3d, 0x5b, 0xc5, 0xd5, 0xbb, 0x92, 0xbd, 0x4f, 0x5f, 0xac, 0xce, 0xf7, 0xaf, 0x56, 0x3b, 0xf3,
0xb1, 0x71, 0xb9, 0xb6, 0xd0, 0xcf, 0xb5, 0x85, 0x7e, 0xaf, 0x2d, 0xf4, 0xf9, 0x8f, 0xd5, 0x99,
0xf7, 0xf5, 0x7f, 0x79, 0xfc, 0x37, 0x00, 0x00, 0xff, 0xff, 0x55, 0xf2, 0x2e, 0x06, 0x7d, 0x02,
0x00, 0x00,
}

View file

@ -0,0 +1,30 @@
syntax = "proto3";
package contenthash;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
enum CacheRecordType {
option (gogoproto.goproto_enum_prefix) = false;
option (gogoproto.enum_customname) = "CacheRecordType";
FILE = 0 [(gogoproto.enumvalue_customname) = "CacheRecordTypeFile"];
DIR = 1 [(gogoproto.enumvalue_customname) = "CacheRecordTypeDir"];
DIR_HEADER = 2 [(gogoproto.enumvalue_customname) = "CacheRecordTypeDirHeader"];
SYMLINK = 3 [(gogoproto.enumvalue_customname) = "CacheRecordTypeSymlink"];
}
message CacheRecord {
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
CacheRecordType type = 2;
string linkname = 3;
}
message CacheRecordWithPath {
string path = 1;
CacheRecord record = 2;
}
message CacheRecords {
repeated CacheRecordWithPath paths = 1;
}

View file

@ -0,0 +1,98 @@
package contenthash
import (
"archive/tar"
"crypto/sha256"
"hash"
"os"
"path/filepath"
"time"
"github.com/tonistiigi/fsutil"
)
// NewFileHash returns new hash that is used for the builder cache keys
func NewFileHash(path string, fi os.FileInfo) (hash.Hash, error) {
var link string
if fi.Mode()&os.ModeSymlink != 0 {
var err error
link, err = os.Readlink(path)
if err != nil {
return nil, err
}
}
stat := &fsutil.Stat{
Mode: uint32(fi.Mode()),
Size_: fi.Size(),
ModTime: fi.ModTime().UnixNano(),
Linkname: link,
}
if fi.Mode()&os.ModeSymlink != 0 {
stat.Mode = stat.Mode | 0777
}
if err := setUnixOpt(path, fi, stat); err != nil {
return nil, err
}
return NewFromStat(stat)
}
func NewFromStat(stat *fsutil.Stat) (hash.Hash, error) {
fi := &statInfo{stat}
hdr, err := tar.FileInfoHeader(fi, stat.Linkname)
if err != nil {
return nil, err
}
hdr.Name = "" // note: empty name is different from current has in docker build. Name is added on recursive directory scan instead
hdr.Mode = int64(chmodWindowsTarEntry(os.FileMode(hdr.Mode)))
hdr.Devmajor = stat.Devmajor
hdr.Devminor = stat.Devminor
if len(stat.Xattrs) > 0 {
hdr.Xattrs = make(map[string]string, len(stat.Xattrs))
for k, v := range stat.Xattrs {
hdr.Xattrs[k] = string(v)
}
}
// fmt.Printf("hdr: %#v\n", hdr)
tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()}
tsh.Reset() // initialize header
return tsh, nil
}
type tarsumHash struct {
hash.Hash
hdr *tar.Header
}
// Reset resets the Hash to its initial state.
func (tsh *tarsumHash) Reset() {
// comply with hash.Hash and reset to the state hash had before any writes
tsh.Hash.Reset()
WriteV1TarsumHeaders(tsh.hdr, tsh.Hash)
}
type statInfo struct {
*fsutil.Stat
}
func (s *statInfo) Name() string {
return filepath.Base(s.Stat.Path)
}
func (s *statInfo) Size() int64 {
return s.Stat.Size_
}
func (s *statInfo) Mode() os.FileMode {
return os.FileMode(s.Stat.Mode)
}
func (s *statInfo) ModTime() time.Time {
return time.Unix(s.Stat.ModTime/1e9, s.Stat.ModTime%1e9)
}
func (s *statInfo) IsDir() bool {
return s.Mode().IsDir()
}
func (s *statInfo) Sys() interface{} {
return s.Stat
}

View file

@ -0,0 +1,47 @@
// +build !windows
package contenthash
import (
"os"
"syscall"
"github.com/containerd/continuity/sysx"
"github.com/tonistiigi/fsutil"
"golang.org/x/sys/unix"
)
func chmodWindowsTarEntry(perm os.FileMode) os.FileMode {
return perm
}
func setUnixOpt(path string, fi os.FileInfo, stat *fsutil.Stat) error {
s := fi.Sys().(*syscall.Stat_t)
stat.Uid = s.Uid
stat.Gid = s.Gid
if !fi.IsDir() {
if s.Mode&syscall.S_IFBLK != 0 ||
s.Mode&syscall.S_IFCHR != 0 {
stat.Devmajor = int64(unix.Major(uint64(s.Rdev)))
stat.Devminor = int64(unix.Minor(uint64(s.Rdev)))
}
}
attrs, err := sysx.LListxattr(path)
if err != nil {
return err
}
if len(attrs) > 0 {
stat.Xattrs = map[string][]byte{}
for _, attr := range attrs {
v, err := sysx.LGetxattr(path, attr)
if err == nil {
stat.Xattrs[attr] = v
}
}
}
return nil
}

View file

@ -0,0 +1,23 @@
// +build windows
package contenthash
import (
"os"
"github.com/tonistiigi/fsutil"
)
// chmodWindowsTarEntry is used to adjust the file permissions used in tar
// header based on the platform the archival is done.
func chmodWindowsTarEntry(perm os.FileMode) os.FileMode {
perm &= 0755
// Add the x bit: make everything +x from windows
perm |= 0111
return perm
}
func setUnixOpt(path string, fi os.FileInfo, stat *fsutil.Stat) error {
return nil
}

View file

@ -0,0 +1,3 @@
package contenthash
//go:generate protoc -I=. -I=../../vendor/ --gogofaster_out=. checksum.proto

View file

@ -0,0 +1,60 @@
package contenthash
import (
"archive/tar"
"io"
"sort"
"strconv"
)
// WriteV1TarsumHeaders writes a tar header to a writer in V1 tarsum format.
func WriteV1TarsumHeaders(h *tar.Header, w io.Writer) {
for _, elem := range v1TarHeaderSelect(h) {
w.Write([]byte(elem[0] + elem[1]))
}
}
// Functions below are from docker legacy tarsum implementation.
// There is no valid technical reason to continue using them.
func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
return [][2]string{
{"name", h.Name},
{"mode", strconv.FormatInt(h.Mode, 10)},
{"uid", strconv.Itoa(h.Uid)},
{"gid", strconv.Itoa(h.Gid)},
{"size", strconv.FormatInt(h.Size, 10)},
{"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)},
{"typeflag", string([]byte{h.Typeflag})},
{"linkname", h.Linkname},
{"uname", h.Uname},
{"gname", h.Gname},
{"devmajor", strconv.FormatInt(h.Devmajor, 10)},
{"devminor", strconv.FormatInt(h.Devminor, 10)},
}
}
func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
// Get extended attributes.
xAttrKeys := make([]string, len(h.Xattrs))
for k := range h.Xattrs {
xAttrKeys = append(xAttrKeys, k)
}
sort.Strings(xAttrKeys)
// Make the slice with enough capacity to hold the 11 basic headers
// we want from the v0 selector plus however many xattrs we have.
orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys))
// Copy all headers from v0 excluding the 'mtime' header (the 5th element).
v0headers := v0TarHeaderSelect(h)
orderedHeaders = append(orderedHeaders, v0headers[0:5]...)
orderedHeaders = append(orderedHeaders, v0headers[6:]...)
// Finally, append the sorted xattrs.
for _, k := range xAttrKeys {
orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]})
}
return
}

71
vendor/github.com/moby/buildkit/cache/fsutil.go generated vendored Normal file
View file

@ -0,0 +1,71 @@
package cache
import (
"context"
"io"
"io/ioutil"
"os"
"github.com/containerd/continuity/fs"
"github.com/moby/buildkit/snapshot"
)
type ReadRequest struct {
Filename string
Range *FileRange
}
type FileRange struct {
Offset int
Length int
}
func ReadFile(ctx context.Context, ref ImmutableRef, req ReadRequest) ([]byte, error) {
mount, err := ref.Mount(ctx, true)
if err != nil {
return nil, err
}
lm := snapshot.LocalMounter(mount)
root, err := lm.Mount()
if err != nil {
return nil, err
}
defer func() {
if lm != nil {
lm.Unmount()
}
}()
fp, err := fs.RootPath(root, req.Filename)
if err != nil {
return nil, err
}
var dt []byte
if req.Range == nil {
dt, err = ioutil.ReadFile(fp)
if err != nil {
return nil, err
}
} else {
f, err := os.Open(fp)
if err != nil {
return nil, err
}
dt, err = ioutil.ReadAll(io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length)))
f.Close()
if err != nil {
return nil, err
}
}
if err := lm.Unmount(); err != nil {
return nil, err
}
lm = nil
return dt, err
}

27
vendor/github.com/moby/buildkit/cache/gc.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
package cache
import (
"context"
"errors"
"time"
)
// GCPolicy defines policy for garbage collection
type GCPolicy struct {
MaxSize uint64
MaxKeepDuration time.Duration
}
// // CachePolicy defines policy for keeping a resource in cache
// type CachePolicy struct {
// Priority int
// LastUsed time.Time
// }
//
// func defaultCachePolicy() CachePolicy {
// return CachePolicy{Priority: 10, LastUsed: time.Now()}
// }
func (cm *cacheManager) GC(ctx context.Context) error {
return errors.New("GC not implemented")
}

573
vendor/github.com/moby/buildkit/cache/manager.go generated vendored Normal file
View file

@ -0,0 +1,573 @@
package cache
import (
"context"
"strings"
"sync"
"time"
"github.com/containerd/containerd/snapshots"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/snapshot"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
var (
errLocked = errors.New("locked")
errNotFound = errors.New("not found")
errInvalid = errors.New("invalid")
)
type ManagerOpt struct {
Snapshotter snapshot.SnapshotterBase
GCPolicy GCPolicy
MetadataStore *metadata.Store
}
type Accessor interface {
Get(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error)
GetFromSnapshotter(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error)
New(ctx context.Context, s ImmutableRef, opts ...RefOption) (MutableRef, error)
GetMutable(ctx context.Context, id string) (MutableRef, error) // Rebase?
}
type Controller interface {
DiskUsage(ctx context.Context, info client.DiskUsageInfo) ([]*client.UsageInfo, error)
Prune(ctx context.Context, ch chan client.UsageInfo) error
GC(ctx context.Context) error
}
type Manager interface {
Accessor
Controller
Close() error
}
type cacheManager struct {
records map[string]*cacheRecord
mu sync.Mutex
ManagerOpt
md *metadata.Store
muPrune sync.Mutex // make sure parallel prune is not allowed so there will not be inconsistent results
}
func NewManager(opt ManagerOpt) (Manager, error) {
cm := &cacheManager{
ManagerOpt: opt,
md: opt.MetadataStore,
records: make(map[string]*cacheRecord),
}
if err := cm.init(context.TODO()); err != nil {
return nil, err
}
// cm.scheduleGC(5 * time.Minute)
return cm, nil
}
// init loads all snapshots from metadata state and tries to load the records
// from the snapshotter. If snaphot can't be found, metadata is deleted as well.
func (cm *cacheManager) init(ctx context.Context) error {
items, err := cm.md.All()
if err != nil {
return err
}
for _, si := range items {
if _, err := cm.getRecord(ctx, si.ID(), false); err != nil {
logrus.Debugf("could not load snapshot %s: %v", si.ID(), err)
cm.md.Clear(si.ID())
// TODO: make sure content is deleted as well
}
}
return nil
}
// Close closes the manager and releases the metadata database lock. No other
// method should be called after Close.
func (cm *cacheManager) Close() error {
// TODO: allocate internal context and cancel it here
return cm.md.Close()
}
// Get returns an immutable snapshot reference for ID
func (cm *cacheManager) Get(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error) {
cm.mu.Lock()
defer cm.mu.Unlock()
return cm.get(ctx, id, false, opts...)
}
// Get returns an immutable snapshot reference for ID
func (cm *cacheManager) GetFromSnapshotter(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error) {
cm.mu.Lock()
defer cm.mu.Unlock()
return cm.get(ctx, id, true, opts...)
}
// get requires manager lock to be taken
func (cm *cacheManager) get(ctx context.Context, id string, fromSnapshotter bool, opts ...RefOption) (ImmutableRef, error) {
rec, err := cm.getRecord(ctx, id, fromSnapshotter, opts...)
if err != nil {
return nil, err
}
rec.mu.Lock()
defer rec.mu.Unlock()
if rec.mutable {
if len(rec.refs) != 0 {
return nil, errors.Wrapf(errLocked, "%s is locked", id)
}
if rec.equalImmutable != nil {
return rec.equalImmutable.ref(), nil
}
return rec.mref().commit(ctx)
}
return rec.ref(), nil
}
// getRecord returns record for id. Requires manager lock.
func (cm *cacheManager) getRecord(ctx context.Context, id string, fromSnapshotter bool, opts ...RefOption) (cr *cacheRecord, retErr error) {
if rec, ok := cm.records[id]; ok {
if rec.isDead() {
return nil, errNotFound
}
return rec, nil
}
md, ok := cm.md.Get(id)
if !ok && !fromSnapshotter {
return nil, errNotFound
}
if mutableID := getEqualMutable(md); mutableID != "" {
mutable, err := cm.getRecord(ctx, mutableID, fromSnapshotter)
if err != nil {
// check loading mutable deleted record from disk
if errors.Cause(err) == errNotFound {
cm.md.Clear(id)
}
return nil, err
}
rec := &cacheRecord{
mu: &sync.Mutex{},
cm: cm,
refs: make(map[Mountable]struct{}),
parent: mutable.Parent(),
md: md,
equalMutable: &mutableRef{cacheRecord: mutable},
}
mutable.equalImmutable = &immutableRef{cacheRecord: rec}
cm.records[id] = rec
return rec, nil
}
info, err := cm.Snapshotter.Stat(ctx, id)
if err != nil {
return nil, errors.Wrap(errNotFound, err.Error())
}
var parent ImmutableRef
if info.Parent != "" {
parent, err = cm.get(ctx, info.Parent, fromSnapshotter, opts...)
if err != nil {
return nil, err
}
defer func() {
if retErr != nil {
parent.Release(context.TODO())
}
}()
}
rec := &cacheRecord{
mu: &sync.Mutex{},
mutable: info.Kind != snapshots.KindCommitted,
cm: cm,
refs: make(map[Mountable]struct{}),
parent: parent,
md: md,
}
// the record was deleted but we crashed before data on disk was removed
if getDeleted(md) {
if err := rec.remove(ctx, true); err != nil {
return nil, err
}
return nil, errNotFound
}
if err := initializeMetadata(rec, opts...); err != nil {
if parent != nil {
parent.Release(context.TODO())
}
return nil, err
}
cm.records[id] = rec
return rec, nil
}
func (cm *cacheManager) New(ctx context.Context, s ImmutableRef, opts ...RefOption) (MutableRef, error) {
id := identity.NewID()
var parent ImmutableRef
var parentID string
if s != nil {
var err error
parent, err = cm.Get(ctx, s.ID())
if err != nil {
return nil, err
}
if err := parent.Finalize(ctx); err != nil {
return nil, err
}
parentID = parent.ID()
}
if err := cm.Snapshotter.Prepare(ctx, id, parentID); err != nil {
if parent != nil {
parent.Release(context.TODO())
}
return nil, errors.Wrapf(err, "failed to prepare %s", id)
}
md, _ := cm.md.Get(id)
rec := &cacheRecord{
mu: &sync.Mutex{},
mutable: true,
cm: cm,
refs: make(map[Mountable]struct{}),
parent: parent,
md: md,
}
if err := initializeMetadata(rec, opts...); err != nil {
if parent != nil {
parent.Release(context.TODO())
}
return nil, err
}
cm.mu.Lock()
defer cm.mu.Unlock()
cm.records[id] = rec // TODO: save to db
return rec.mref(), nil
}
func (cm *cacheManager) GetMutable(ctx context.Context, id string) (MutableRef, error) {
cm.mu.Lock()
defer cm.mu.Unlock()
rec, err := cm.getRecord(ctx, id, false)
if err != nil {
return nil, err
}
rec.mu.Lock()
defer rec.mu.Unlock()
if !rec.mutable {
return nil, errors.Wrapf(errInvalid, "%s is not mutable", id)
}
if len(rec.refs) != 0 {
return nil, errors.Wrapf(errLocked, "%s is locked", id)
}
if rec.equalImmutable != nil {
if len(rec.equalImmutable.refs) != 0 {
return nil, errors.Wrapf(errLocked, "%s is locked", id)
}
delete(cm.records, rec.equalImmutable.ID())
if err := rec.equalImmutable.remove(ctx, false); err != nil {
return nil, err
}
rec.equalImmutable = nil
}
return rec.mref(), nil
}
func (cm *cacheManager) Prune(ctx context.Context, ch chan client.UsageInfo) error {
cm.muPrune.Lock()
defer cm.muPrune.Unlock()
return cm.prune(ctx, ch)
}
func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo) error {
var toDelete []*cacheRecord
cm.mu.Lock()
for _, cr := range cm.records {
cr.mu.Lock()
// ignore duplicates that share data
if cr.equalImmutable != nil && len(cr.equalImmutable.refs) > 0 || cr.equalMutable != nil && len(cr.refs) == 0 {
cr.mu.Unlock()
continue
}
if cr.isDead() {
cr.mu.Unlock()
continue
}
if len(cr.refs) == 0 {
cr.dead = true
toDelete = append(toDelete, cr)
}
// mark metadata as deleted in case we crash before cleanup finished
if err := setDeleted(cr.md); err != nil {
cr.mu.Unlock()
cm.mu.Unlock()
return err
}
cr.mu.Unlock()
}
cm.mu.Unlock()
if len(toDelete) == 0 {
return nil
}
var err error
for _, cr := range toDelete {
cr.mu.Lock()
usageCount, lastUsedAt := getLastUsed(cr.md)
c := client.UsageInfo{
ID: cr.ID(),
Mutable: cr.mutable,
InUse: len(cr.refs) > 0,
Size: getSize(cr.md),
CreatedAt: GetCreatedAt(cr.md),
Description: GetDescription(cr.md),
LastUsedAt: lastUsedAt,
UsageCount: usageCount,
}
if cr.parent != nil {
c.Parent = cr.parent.ID()
}
if c.Size == sizeUnknown {
cr.mu.Unlock() // all the non-prune modifications already protected by cr.dead
s, err := cr.Size(ctx)
if err != nil {
return err
}
c.Size = s
cr.mu.Lock()
}
if cr.equalImmutable != nil {
if err1 := cr.equalImmutable.remove(ctx, false); err == nil {
err = err1
}
}
if err1 := cr.remove(ctx, true); err == nil {
err = err1
}
if err == nil && ch != nil {
ch <- c
}
cr.mu.Unlock()
}
if err != nil {
return err
}
select {
case <-ctx.Done():
return ctx.Err()
default:
return cm.prune(ctx, ch)
}
}
func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) {
cm.mu.Lock()
type cacheUsageInfo struct {
refs int
parent string
size int64
mutable bool
createdAt time.Time
usageCount int
lastUsedAt *time.Time
description string
doubleRef bool
}
m := make(map[string]*cacheUsageInfo, len(cm.records))
rescan := make(map[string]struct{}, len(cm.records))
for id, cr := range cm.records {
cr.mu.Lock()
// ignore duplicates that share data
if cr.equalImmutable != nil && len(cr.equalImmutable.refs) > 0 || cr.equalMutable != nil && len(cr.refs) == 0 {
cr.mu.Unlock()
continue
}
usageCount, lastUsedAt := getLastUsed(cr.md)
c := &cacheUsageInfo{
refs: len(cr.refs),
mutable: cr.mutable,
size: getSize(cr.md),
createdAt: GetCreatedAt(cr.md),
usageCount: usageCount,
lastUsedAt: lastUsedAt,
description: GetDescription(cr.md),
doubleRef: cr.equalImmutable != nil,
}
if cr.parent != nil {
c.parent = cr.parent.ID()
}
if cr.mutable && c.refs > 0 {
c.size = 0 // size can not be determined because it is changing
}
m[id] = c
rescan[id] = struct{}{}
cr.mu.Unlock()
}
cm.mu.Unlock()
for {
if len(rescan) == 0 {
break
}
for id := range rescan {
v := m[id]
if v.refs == 0 && v.parent != "" {
m[v.parent].refs--
if v.doubleRef {
m[v.parent].refs--
}
rescan[v.parent] = struct{}{}
}
delete(rescan, id)
}
}
var du []*client.UsageInfo
for id, cr := range m {
if opt.Filter != "" && !strings.HasPrefix(id, opt.Filter) {
continue
}
c := &client.UsageInfo{
ID: id,
Mutable: cr.mutable,
InUse: cr.refs > 0,
Size: cr.size,
Parent: cr.parent,
CreatedAt: cr.createdAt,
Description: cr.description,
LastUsedAt: cr.lastUsedAt,
UsageCount: cr.usageCount,
}
du = append(du, c)
}
eg, ctx := errgroup.WithContext(ctx)
for _, d := range du {
if d.Size == sizeUnknown {
func(d *client.UsageInfo) {
eg.Go(func() error {
ref, err := cm.Get(ctx, d.ID)
if err != nil {
d.Size = 0
return nil
}
s, err := ref.Size(ctx)
if err != nil {
return err
}
d.Size = s
return ref.Release(context.TODO())
})
}(d)
}
}
if err := eg.Wait(); err != nil {
return du, err
}
return du, nil
}
func IsLocked(err error) bool {
return errors.Cause(err) == errLocked
}
func IsNotFound(err error) bool {
return errors.Cause(err) == errNotFound
}
type RefOption func(withMetadata) error
type cachePolicy int
const (
cachePolicyDefault cachePolicy = iota
cachePolicyRetain
)
type withMetadata interface {
Metadata() *metadata.StorageItem
}
func HasCachePolicyRetain(m withMetadata) bool {
return getCachePolicy(m.Metadata()) == cachePolicyRetain
}
func CachePolicyRetain(m withMetadata) error {
return queueCachePolicy(m.Metadata(), cachePolicyRetain)
}
func WithDescription(descr string) RefOption {
return func(m withMetadata) error {
return queueDescription(m.Metadata(), descr)
}
}
func WithCreationTime(tm time.Time) RefOption {
return func(m withMetadata) error {
return queueCreatedAt(m.Metadata(), tm)
}
}
func initializeMetadata(m withMetadata, opts ...RefOption) error {
md := m.Metadata()
if tm := GetCreatedAt(md); !tm.IsZero() {
return nil
}
if err := queueCreatedAt(md, time.Now()); err != nil {
return err
}
for _, opt := range opts {
if err := opt(m); err != nil {
return err
}
}
return md.Commit()
}

206
vendor/github.com/moby/buildkit/cache/metadata.go generated vendored Normal file
View file

@ -0,0 +1,206 @@
package cache
import (
"time"
"github.com/boltdb/bolt"
"github.com/moby/buildkit/cache/metadata"
"github.com/pkg/errors"
)
const sizeUnknown int64 = -1
const keySize = "snapshot.size"
const keyEqualMutable = "cache.equalMutable"
const keyCachePolicy = "cache.cachePolicy"
const keyDescription = "cache.description"
const keyCreatedAt = "cache.createdAt"
const keyLastUsedAt = "cache.lastUsedAt"
const keyUsageCount = "cache.usageCount"
const keyDeleted = "cache.deleted"
func setDeleted(si *metadata.StorageItem) error {
v, err := metadata.NewValue(true)
if err != nil {
return errors.Wrap(err, "failed to create size value")
}
si.Update(func(b *bolt.Bucket) error {
return si.SetValue(b, keyDeleted, v)
})
return nil
}
func getDeleted(si *metadata.StorageItem) bool {
v := si.Get(keyDeleted)
if v == nil {
return false
}
var deleted bool
if err := v.Unmarshal(&deleted); err != nil {
return false
}
return deleted
}
func setSize(si *metadata.StorageItem, s int64) error {
v, err := metadata.NewValue(s)
if err != nil {
return errors.Wrap(err, "failed to create size value")
}
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, keySize, v)
})
return nil
}
func getSize(si *metadata.StorageItem) int64 {
v := si.Get(keySize)
if v == nil {
return sizeUnknown
}
var size int64
if err := v.Unmarshal(&size); err != nil {
return sizeUnknown
}
return size
}
func getEqualMutable(si *metadata.StorageItem) string {
v := si.Get(keyEqualMutable)
if v == nil {
return ""
}
var str string
if err := v.Unmarshal(&str); err != nil {
return ""
}
return str
}
func setEqualMutable(si *metadata.StorageItem, s string) error {
v, err := metadata.NewValue(s)
if err != nil {
return errors.Wrapf(err, "failed to create %s meta value", keyEqualMutable)
}
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, keyEqualMutable, v)
})
return nil
}
func clearEqualMutable(si *metadata.StorageItem) error {
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, keyEqualMutable, nil)
})
return nil
}
func queueCachePolicy(si *metadata.StorageItem, p cachePolicy) error {
v, err := metadata.NewValue(p)
if err != nil {
return errors.Wrap(err, "failed to create cachePolicy value")
}
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, keyCachePolicy, v)
})
return nil
}
func getCachePolicy(si *metadata.StorageItem) cachePolicy {
v := si.Get(keyCachePolicy)
if v == nil {
return cachePolicyDefault
}
var p cachePolicy
if err := v.Unmarshal(&p); err != nil {
return cachePolicyDefault
}
return p
}
func queueDescription(si *metadata.StorageItem, descr string) error {
v, err := metadata.NewValue(descr)
if err != nil {
return errors.Wrap(err, "failed to create description value")
}
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, keyDescription, v)
})
return nil
}
func GetDescription(si *metadata.StorageItem) string {
v := si.Get(keyDescription)
if v == nil {
return ""
}
var str string
if err := v.Unmarshal(&str); err != nil {
return ""
}
return str
}
func queueCreatedAt(si *metadata.StorageItem, tm time.Time) error {
v, err := metadata.NewValue(tm.UnixNano())
if err != nil {
return errors.Wrap(err, "failed to create createdAt value")
}
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, keyCreatedAt, v)
})
return nil
}
func GetCreatedAt(si *metadata.StorageItem) time.Time {
v := si.Get(keyCreatedAt)
if v == nil {
return time.Time{}
}
var tm int64
if err := v.Unmarshal(&tm); err != nil {
return time.Time{}
}
return time.Unix(tm/1e9, tm%1e9)
}
func getLastUsed(si *metadata.StorageItem) (int, *time.Time) {
v := si.Get(keyUsageCount)
if v == nil {
return 0, nil
}
var usageCount int
if err := v.Unmarshal(&usageCount); err != nil {
return 0, nil
}
v = si.Get(keyLastUsedAt)
if v == nil {
return usageCount, nil
}
var lastUsedTs int64
if err := v.Unmarshal(&lastUsedTs); err != nil || lastUsedTs == 0 {
return usageCount, nil
}
tm := time.Unix(lastUsedTs/1e9, lastUsedTs%1e9)
return usageCount, &tm
}
func updateLastUsed(si *metadata.StorageItem) error {
count, _ := getLastUsed(si)
count++
v, err := metadata.NewValue(count)
if err != nil {
return errors.Wrap(err, "failed to create usageCount value")
}
v2, err := metadata.NewValue(time.Now().UnixNano())
if err != nil {
return errors.Wrap(err, "failed to create lastUsedAt value")
}
return si.Update(func(b *bolt.Bucket) error {
if err := si.SetValue(b, keyUsageCount, v); err != nil {
return err
}
return si.SetValue(b, keyLastUsedAt, v2)
})
}

View file

@ -0,0 +1,382 @@
package metadata
import (
"bytes"
"encoding/json"
"strings"
"sync"
"github.com/boltdb/bolt"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
mainBucket = "_main"
indexBucket = "_index"
externalBucket = "_external"
)
var errNotFound = errors.Errorf("not found")
type Store struct {
db *bolt.DB
}
func NewStore(dbPath string) (*Store, error) {
db, err := bolt.Open(dbPath, 0600, nil)
if err != nil {
return nil, errors.Wrapf(err, "failed to open database file %s", dbPath)
}
return &Store{db: db}, nil
}
func (s *Store) DB() *bolt.DB {
return s.db
}
func (s *Store) All() ([]*StorageItem, error) {
var out []*StorageItem
err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(mainBucket))
if b == nil {
return nil
}
return b.ForEach(func(key, _ []byte) error {
b := b.Bucket(key)
if b == nil {
return nil
}
si, err := newStorageItem(string(key), b, s)
if err != nil {
return err
}
out = append(out, si)
return nil
})
})
return out, err
}
func (s *Store) Probe(index string) (bool, error) {
var exists bool
err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(indexBucket))
if b == nil {
return nil
}
main := tx.Bucket([]byte(mainBucket))
if main == nil {
return nil
}
search := []byte(indexKey(index, ""))
c := b.Cursor()
k, _ := c.Seek(search)
if k != nil && bytes.HasPrefix(k, search) {
exists = true
}
return nil
})
return exists, err
}
func (s *Store) Search(index string) ([]*StorageItem, error) {
var out []*StorageItem
err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(indexBucket))
if b == nil {
return nil
}
main := tx.Bucket([]byte(mainBucket))
if main == nil {
return nil
}
index = indexKey(index, "")
c := b.Cursor()
k, _ := c.Seek([]byte(index))
for {
if k != nil && strings.HasPrefix(string(k), index) {
itemID := strings.TrimPrefix(string(k), index)
k, _ = c.Next()
b := main.Bucket([]byte(itemID))
if b == nil {
logrus.Errorf("index pointing to missing record %s", itemID)
continue
}
si, err := newStorageItem(itemID, b, s)
if err != nil {
return err
}
out = append(out, si)
} else {
break
}
}
return nil
})
return out, err
}
func (s *Store) View(id string, fn func(b *bolt.Bucket) error) error {
return s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(mainBucket))
if b == nil {
return errors.WithStack(errNotFound)
}
b = b.Bucket([]byte(id))
if b == nil {
return errors.WithStack(errNotFound)
}
return fn(b)
})
}
func (s *Store) Clear(id string) error {
return s.db.Update(func(tx *bolt.Tx) error {
external := tx.Bucket([]byte(externalBucket))
if external != nil {
external.DeleteBucket([]byte(id))
}
main := tx.Bucket([]byte(mainBucket))
if main == nil {
return nil
}
b := main.Bucket([]byte(id))
if b == nil {
return nil
}
si, err := newStorageItem(id, b, s)
if err != nil {
return err
}
if indexes := si.Indexes(); len(indexes) > 0 {
b := tx.Bucket([]byte(indexBucket))
if b != nil {
for _, index := range indexes {
if err := b.Delete([]byte(indexKey(index, id))); err != nil {
return err
}
}
}
}
return main.DeleteBucket([]byte(id))
})
}
func (s *Store) Update(id string, fn func(b *bolt.Bucket) error) error {
return s.db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(mainBucket))
if err != nil {
return err
}
b, err = b.CreateBucketIfNotExists([]byte(id))
if err != nil {
return err
}
return fn(b)
})
}
func (s *Store) Get(id string) (*StorageItem, bool) {
empty := func() *StorageItem {
si, _ := newStorageItem(id, nil, s)
return si
}
tx, err := s.db.Begin(false)
if err != nil {
return empty(), false
}
defer tx.Rollback()
b := tx.Bucket([]byte(mainBucket))
if b == nil {
return empty(), false
}
b = b.Bucket([]byte(id))
if b == nil {
return empty(), false
}
si, _ := newStorageItem(id, b, s)
return si, true
}
func (s *Store) Close() error {
return s.db.Close()
}
type StorageItem struct {
id string
values map[string]*Value
queue []func(*bolt.Bucket) error
storage *Store
mu sync.RWMutex
}
func newStorageItem(id string, b *bolt.Bucket, s *Store) (*StorageItem, error) {
si := &StorageItem{
id: id,
storage: s,
values: make(map[string]*Value),
}
if b != nil {
if err := b.ForEach(func(k, v []byte) error {
var sv Value
if len(v) > 0 {
if err := json.Unmarshal(v, &sv); err != nil {
return err
}
si.values[string(k)] = &sv
}
return nil
}); err != nil {
return si, err
}
}
return si, nil
}
func (s *StorageItem) Storage() *Store { // TODO: used in local source. how to remove this?
return s.storage
}
func (s *StorageItem) ID() string {
return s.id
}
func (s *StorageItem) View(fn func(b *bolt.Bucket) error) error {
return s.storage.View(s.id, fn)
}
func (s *StorageItem) Update(fn func(b *bolt.Bucket) error) error {
return s.storage.Update(s.id, fn)
}
func (s *StorageItem) Keys() []string {
keys := make([]string, 0, len(s.values))
for k := range s.values {
keys = append(keys, k)
}
return keys
}
func (s *StorageItem) Get(k string) *Value {
s.mu.RLock()
v := s.values[k]
s.mu.RUnlock()
return v
}
func (s *StorageItem) GetExternal(k string) ([]byte, error) {
var dt []byte
err := s.storage.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(externalBucket))
if b == nil {
return errors.WithStack(errNotFound)
}
b = b.Bucket([]byte(s.id))
if b == nil {
return errors.WithStack(errNotFound)
}
dt = b.Get([]byte(k))
if dt == nil {
return errors.WithStack(errNotFound)
}
return nil
})
if err != nil {
return nil, err
}
return dt, nil
}
func (s *StorageItem) SetExternal(k string, dt []byte) error {
return s.storage.db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(externalBucket))
if err != nil {
return err
}
b, err = b.CreateBucketIfNotExists([]byte(s.id))
if err != nil {
return err
}
return b.Put([]byte(k), dt)
})
}
func (s *StorageItem) Queue(fn func(b *bolt.Bucket) error) {
s.mu.Lock()
defer s.mu.Unlock()
s.queue = append(s.queue, fn)
}
func (s *StorageItem) Commit() error {
s.mu.Lock()
defer s.mu.Unlock()
return s.Update(func(b *bolt.Bucket) error {
for _, fn := range s.queue {
if err := fn(b); err != nil {
return err
}
}
s.queue = s.queue[:0]
return nil
})
}
func (s *StorageItem) Indexes() (out []string) {
for _, v := range s.values {
if v.Index != "" {
out = append(out, v.Index)
}
}
return
}
func (s *StorageItem) SetValue(b *bolt.Bucket, key string, v *Value) error {
if v == nil {
if err := b.Put([]byte(key), nil); err != nil {
return err
}
delete(s.values, key)
return nil
}
dt, err := json.Marshal(v)
if err != nil {
return err
}
if err := b.Put([]byte(key), dt); err != nil {
return err
}
if v.Index != "" {
b, err := b.Tx().CreateBucketIfNotExists([]byte(indexBucket))
if err != nil {
return err
}
if err := b.Put([]byte(indexKey(v.Index, s.ID())), []byte{}); err != nil {
return err
}
}
s.values[key] = v
return nil
}
type Value struct {
Value json.RawMessage `json:"value,omitempty"`
Index string `json:"index,omitempty"`
}
func NewValue(v interface{}) (*Value, error) {
dt, err := json.Marshal(v)
if err != nil {
return nil, err
}
return &Value{Value: json.RawMessage(dt)}, nil
}
func (v *Value) Unmarshal(target interface{}) error {
err := json.Unmarshal(v.Value, target)
return err
}
func indexKey(index, target string) string {
return index + "::" + target
}

380
vendor/github.com/moby/buildkit/cache/refs.go generated vendored Normal file
View file

@ -0,0 +1,380 @@
package cache
import (
"context"
"sync"
"github.com/containerd/containerd/mount"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/util/flightcontrol"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// Ref is a reference to cacheable objects.
type Ref interface {
Mountable
ID() string
Release(context.Context) error
Size(ctx context.Context) (int64, error)
Metadata() *metadata.StorageItem
}
type ImmutableRef interface {
Ref
Parent() ImmutableRef
Finalize(ctx context.Context) error // Make sure reference is flushed to driver
Clone() ImmutableRef
}
type MutableRef interface {
Ref
Commit(context.Context) (ImmutableRef, error)
}
type Mountable interface {
Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error)
}
type cacheRecord struct {
cm *cacheManager
mu *sync.Mutex // the mutex is shared by records sharing data
mutable bool
refs map[Mountable]struct{}
parent ImmutableRef
md *metadata.StorageItem
// dead means record is marked as deleted
dead bool
view string
viewMount snapshot.Mountable
sizeG flightcontrol.Group
// these are filled if multiple refs point to same data
equalMutable *mutableRef
equalImmutable *immutableRef
}
// hold ref lock before calling
func (cr *cacheRecord) ref() *immutableRef {
ref := &immutableRef{cacheRecord: cr}
cr.refs[ref] = struct{}{}
return ref
}
// hold ref lock before calling
func (cr *cacheRecord) mref() *mutableRef {
ref := &mutableRef{cacheRecord: cr}
cr.refs[ref] = struct{}{}
return ref
}
// hold ref lock before calling
func (cr *cacheRecord) isDead() bool {
return cr.dead || (cr.equalImmutable != nil && cr.equalImmutable.dead) || (cr.equalMutable != nil && cr.equalMutable.dead)
}
func (cr *cacheRecord) Size(ctx context.Context) (int64, error) {
// this expects that usage() is implemented lazily
s, err := cr.sizeG.Do(ctx, cr.ID(), func(ctx context.Context) (interface{}, error) {
cr.mu.Lock()
s := getSize(cr.md)
if s != sizeUnknown {
cr.mu.Unlock()
return s, nil
}
driverID := cr.ID()
if cr.equalMutable != nil {
driverID = cr.equalMutable.ID()
}
cr.mu.Unlock()
usage, err := cr.cm.ManagerOpt.Snapshotter.Usage(ctx, driverID)
if err != nil {
cr.mu.Lock()
isDead := cr.isDead()
cr.mu.Unlock()
if isDead {
return int64(0), nil
}
return s, errors.Wrapf(err, "failed to get usage for %s", cr.ID())
}
cr.mu.Lock()
setSize(cr.md, usage.Size)
if err := cr.md.Commit(); err != nil {
cr.mu.Unlock()
return s, err
}
cr.mu.Unlock()
return usage.Size, nil
})
return s.(int64), err
}
func (cr *cacheRecord) Parent() ImmutableRef {
if cr.parent == nil {
return nil
}
p := cr.parent.(*immutableRef)
p.mu.Lock()
defer p.mu.Unlock()
return p.ref()
}
func (cr *cacheRecord) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
cr.mu.Lock()
defer cr.mu.Unlock()
if cr.mutable {
m, err := cr.cm.Snapshotter.Mounts(ctx, cr.ID())
if err != nil {
return nil, errors.Wrapf(err, "failed to mount %s", cr.ID())
}
if readonly {
m = setReadonly(m)
}
return m, nil
}
if cr.equalMutable != nil && readonly {
m, err := cr.cm.Snapshotter.Mounts(ctx, cr.equalMutable.ID())
if err != nil {
return nil, errors.Wrapf(err, "failed to mount %s", cr.equalMutable.ID())
}
return setReadonly(m), nil
}
if err := cr.finalize(ctx); err != nil {
return nil, err
}
if cr.viewMount == nil { // TODO: handle this better
cr.view = identity.NewID()
m, err := cr.cm.Snapshotter.View(ctx, cr.view, cr.ID())
if err != nil {
cr.view = ""
return nil, errors.Wrapf(err, "failed to mount %s", cr.ID())
}
cr.viewMount = m
}
return cr.viewMount, nil
}
// call when holding the manager lock
func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) error {
delete(cr.cm.records, cr.ID())
if cr.parent != nil {
if err := cr.parent.(*immutableRef).release(ctx); err != nil {
return err
}
}
if removeSnapshot {
if err := cr.cm.Snapshotter.Remove(ctx, cr.ID()); err != nil {
return err
}
}
if err := cr.cm.md.Clear(cr.ID()); err != nil {
return err
}
return nil
}
func (cr *cacheRecord) ID() string {
return cr.md.ID()
}
type immutableRef struct {
*cacheRecord
}
type mutableRef struct {
*cacheRecord
}
func (sr *immutableRef) Clone() ImmutableRef {
sr.mu.Lock()
ref := sr.ref()
sr.mu.Unlock()
return ref
}
func (sr *immutableRef) Release(ctx context.Context) error {
sr.cm.mu.Lock()
defer sr.cm.mu.Unlock()
sr.mu.Lock()
defer sr.mu.Unlock()
return sr.release(ctx)
}
func (sr *immutableRef) release(ctx context.Context) error {
delete(sr.refs, sr)
if len(sr.refs) == 0 {
updateLastUsed(sr.md)
if sr.viewMount != nil { // TODO: release viewMount earlier if possible
if err := sr.cm.Snapshotter.Remove(ctx, sr.view); err != nil {
return err
}
sr.view = ""
sr.viewMount = nil
}
if sr.equalMutable != nil {
sr.equalMutable.release(ctx)
}
// go sr.cm.GC()
}
return nil
}
func (sr *immutableRef) Finalize(ctx context.Context) error {
sr.mu.Lock()
defer sr.mu.Unlock()
return sr.finalize(ctx)
}
func (cr *cacheRecord) Metadata() *metadata.StorageItem {
return cr.md
}
func (cr *cacheRecord) finalize(ctx context.Context) error {
mutable := cr.equalMutable
if mutable == nil {
return nil
}
err := cr.cm.Snapshotter.Commit(ctx, cr.ID(), mutable.ID())
if err != nil {
return errors.Wrapf(err, "failed to commit %s", mutable.ID())
}
mutable.dead = true
go func() {
cr.cm.mu.Lock()
defer cr.cm.mu.Unlock()
if err := mutable.remove(context.TODO(), false); err != nil {
logrus.Error(err)
}
}()
cr.equalMutable = nil
clearEqualMutable(cr.md)
return cr.md.Commit()
}
func (sr *mutableRef) commit(ctx context.Context) (ImmutableRef, error) {
if !sr.mutable || len(sr.refs) == 0 {
return nil, errors.Wrapf(errInvalid, "invalid mutable ref")
}
id := identity.NewID()
md, _ := sr.cm.md.Get(id)
rec := &cacheRecord{
mu: sr.mu,
cm: sr.cm,
parent: sr.Parent(),
equalMutable: sr,
refs: make(map[Mountable]struct{}),
md: md,
}
if descr := GetDescription(sr.md); descr != "" {
if err := queueDescription(md, descr); err != nil {
return nil, err
}
}
if err := initializeMetadata(rec); err != nil {
return nil, err
}
sr.cm.records[id] = rec
if err := sr.md.Commit(); err != nil {
return nil, err
}
setSize(md, sizeUnknown)
setEqualMutable(md, sr.ID())
if err := md.Commit(); err != nil {
return nil, err
}
ref := rec.ref()
sr.equalImmutable = ref
return ref, nil
}
func (sr *mutableRef) Commit(ctx context.Context) (ImmutableRef, error) {
sr.cm.mu.Lock()
defer sr.cm.mu.Unlock()
sr.mu.Lock()
defer sr.mu.Unlock()
return sr.commit(ctx)
}
func (sr *mutableRef) Release(ctx context.Context) error {
sr.cm.mu.Lock()
defer sr.cm.mu.Unlock()
sr.mu.Lock()
defer sr.mu.Unlock()
return sr.release(ctx)
}
func (sr *mutableRef) release(ctx context.Context) error {
delete(sr.refs, sr)
if getCachePolicy(sr.md) != cachePolicyRetain {
if sr.equalImmutable != nil {
if getCachePolicy(sr.equalImmutable.md) == cachePolicyRetain {
return nil
}
if err := sr.equalImmutable.remove(ctx, false); err != nil {
return err
}
}
if sr.parent != nil {
if err := sr.parent.(*immutableRef).release(ctx); err != nil {
return err
}
}
return sr.remove(ctx, true)
} else {
updateLastUsed(sr.md)
}
return nil
}
func setReadonly(mounts snapshot.Mountable) snapshot.Mountable {
return &readOnlyMounter{mounts}
}
type readOnlyMounter struct {
snapshot.Mountable
}
func (m *readOnlyMounter) Mount() ([]mount.Mount, error) {
mounts, err := m.Mountable.Mount()
if err != nil {
return nil, err
}
for i, m := range mounts {
opts := make([]string, 0, len(m.Options))
for _, opt := range m.Options {
if opt != "rw" {
opts = append(opts, opt)
}
}
opts = append(opts, "ro")
mounts[i].Options = opts
}
return mounts, nil
}

View file

@ -0,0 +1,141 @@
package remotecache
import (
"bytes"
"context"
"encoding/json"
"fmt"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/docker/distribution/manifest"
v1 "github.com/moby/buildkit/cache/remotecache/v1"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/progress"
"github.com/moby/buildkit/util/push"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
type ExporterOpt struct {
SessionManager *session.Manager
}
func NewCacheExporter(opt ExporterOpt) *CacheExporter {
return &CacheExporter{opt: opt}
}
type CacheExporter struct {
opt ExporterOpt
}
func (ce *CacheExporter) ExporterForTarget(target string) *RegistryCacheExporter {
cc := v1.NewCacheChains()
return &RegistryCacheExporter{target: target, CacheExporterTarget: cc, chains: cc, exporter: ce}
}
func (ce *CacheExporter) Finalize(ctx context.Context, cc *v1.CacheChains, target string) error {
config, descs, err := cc.Marshal()
if err != nil {
return err
}
// own type because oci type can't be pushed and docker type doesn't have annotations
type manifestList struct {
manifest.Versioned
// Manifests references platform specific manifests.
Manifests []ocispec.Descriptor `json:"manifests"`
}
var mfst manifestList
mfst.SchemaVersion = 2
mfst.MediaType = images.MediaTypeDockerSchema2ManifestList
allBlobs := map[digest.Digest]struct{}{}
mp := contentutil.NewMultiProvider(nil)
for _, l := range config.Layers {
if _, ok := allBlobs[l.Blob]; ok {
continue
}
dgstPair, ok := descs[l.Blob]
if !ok {
return errors.Errorf("missing blob %s", l.Blob)
}
allBlobs[l.Blob] = struct{}{}
mp.Add(l.Blob, dgstPair.Provider)
mfst.Manifests = append(mfst.Manifests, dgstPair.Descriptor)
}
dt, err := json.Marshal(config)
if err != nil {
return err
}
dgst := digest.FromBytes(dt)
desc := ocispec.Descriptor{
Digest: dgst,
Size: int64(len(dt)),
MediaType: v1.CacheConfigMediaTypeV0,
}
configDone := oneOffProgress(ctx, fmt.Sprintf("writing config %s", dgst))
buf := contentutil.NewBuffer()
if err := content.WriteBlob(ctx, buf, dgst.String(), bytes.NewReader(dt), desc); err != nil {
return configDone(errors.Wrap(err, "error writing config blob"))
}
configDone(nil)
mp.Add(dgst, buf)
mfst.Manifests = append(mfst.Manifests, desc)
dt, err = json.Marshal(mfst)
if err != nil {
return errors.Wrap(err, "failed to marshal manifest")
}
dgst = digest.FromBytes(dt)
buf = contentutil.NewBuffer()
desc = ocispec.Descriptor{
Digest: dgst,
Size: int64(len(dt)),
}
mfstDone := oneOffProgress(ctx, fmt.Sprintf("writing manifest %s", dgst))
if err := content.WriteBlob(ctx, buf, dgst.String(), bytes.NewReader(dt), desc); err != nil {
return mfstDone(errors.Wrap(err, "error writing manifest blob"))
}
mfstDone(nil)
mp.Add(dgst, buf)
return push.Push(ctx, ce.opt.SessionManager, mp, dgst, target, false)
}
type RegistryCacheExporter struct {
solver.CacheExporterTarget
chains *v1.CacheChains
target string
exporter *CacheExporter
}
func (ce *RegistryCacheExporter) Finalize(ctx context.Context) error {
return ce.exporter.Finalize(ctx, ce.chains, ce.target)
}
func oneOffProgress(ctx context.Context, id string) func(err error) error {
pw, _, _ := progress.FromContext(ctx)
now := time.Now()
st := progress.Status{
Started: &now,
}
pw.Write(id, st)
return func(err error) error {
now := time.Now()
st.Completed = &now
pw.Write(id, st)
pw.Close()
return err
}
}

View file

@ -0,0 +1,124 @@
package remotecache
import (
"context"
"encoding/json"
"net/http"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
v1 "github.com/moby/buildkit/cache/remotecache/v1"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/auth"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/worker"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
type ImportOpt struct {
SessionManager *session.Manager
Worker worker.Worker // TODO: remove. This sets the worker where the cache is imported to. Should be passed on load instead.
}
func NewCacheImporter(opt ImportOpt) *CacheImporter {
return &CacheImporter{opt: opt}
}
type CacheImporter struct {
opt ImportOpt
}
func (ci *CacheImporter) getCredentialsFromSession(ctx context.Context) func(string) (string, string, error) {
id := session.FromContext(ctx)
if id == "" {
return nil
}
return func(host string) (string, string, error) {
timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
caller, err := ci.opt.SessionManager.Get(timeoutCtx, id)
if err != nil {
return "", "", err
}
return auth.CredentialsFunc(context.TODO(), caller)(host)
}
}
func (ci *CacheImporter) Resolve(ctx context.Context, ref string) (solver.CacheManager, error) {
resolver := docker.NewResolver(docker.ResolverOptions{
Client: http.DefaultClient,
Credentials: ci.getCredentialsFromSession(ctx),
})
ref, desc, err := resolver.Resolve(ctx, ref)
if err != nil {
return nil, err
}
fetcher, err := resolver.Fetcher(ctx, ref)
if err != nil {
return nil, err
}
b := contentutil.NewBuffer()
if _, err := remotes.FetchHandler(b, fetcher)(ctx, desc); err != nil {
return nil, err
}
dt, err := content.ReadBlob(ctx, b, desc)
if err != nil {
return nil, err
}
var mfst ocispec.Index
if err := json.Unmarshal(dt, &mfst); err != nil {
return nil, err
}
allLayers := v1.DescriptorProvider{}
var configDesc ocispec.Descriptor
for _, m := range mfst.Manifests {
if m.MediaType == v1.CacheConfigMediaTypeV0 {
configDesc = m
continue
}
allLayers[m.Digest] = v1.DescriptorProviderPair{
Descriptor: m,
Provider: contentutil.FromFetcher(fetcher, m),
}
}
if configDesc.Digest == "" {
return nil, errors.Errorf("invalid build cache from %s", ref)
}
if _, err := remotes.FetchHandler(b, fetcher)(ctx, configDesc); err != nil {
return nil, err
}
dt, err = content.ReadBlob(ctx, b, configDesc)
if err != nil {
return nil, err
}
cc := v1.NewCacheChains()
if err := v1.Parse(dt, allLayers, cc); err != nil {
return nil, err
}
keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, ci.opt.Worker)
if err != nil {
return nil, err
}
return solver.NewCacheManager(ref, keysStorage, resultStorage), nil
}

View file

@ -0,0 +1,247 @@
package cacheimport
import (
"context"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/worker"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
func NewCacheKeyStorage(cc *CacheChains, w worker.Worker) (solver.CacheKeyStorage, solver.CacheResultStorage, error) {
storage := &cacheKeyStorage{
byID: map[string]*itemWithOutgoingLinks{},
byItem: map[*item]string{},
byResult: map[string]map[string]struct{}{},
}
for _, it := range cc.items {
if _, err := addItemToStorage(storage, it); err != nil {
return nil, nil, err
}
}
results := &cacheResultStorage{
w: w,
byID: storage.byID,
byResult: storage.byResult,
}
return storage, results, nil
}
func addItemToStorage(k *cacheKeyStorage, it *item) (*itemWithOutgoingLinks, error) {
if id, ok := k.byItem[it]; ok {
if id == "" {
return nil, errors.Errorf("invalid loop")
}
return k.byID[id], nil
}
var id string
if len(it.links) == 0 {
id = it.dgst.String()
} else {
id = identity.NewID()
}
k.byItem[it] = ""
for i, m := range it.links {
for l := range m {
src, err := addItemToStorage(k, l.src)
if err != nil {
return nil, err
}
cl := nlink{
input: i,
dgst: it.dgst,
selector: l.selector,
}
src.links[cl] = append(src.links[cl], id)
}
}
k.byItem[it] = id
itl := &itemWithOutgoingLinks{
item: it,
links: map[nlink][]string{},
}
k.byID[id] = itl
if res := it.result; res != nil {
resultID := remoteID(res)
ids, ok := k.byResult[resultID]
if !ok {
ids = map[string]struct{}{}
k.byResult[resultID] = ids
}
ids[id] = struct{}{}
}
return itl, nil
}
type cacheKeyStorage struct {
byID map[string]*itemWithOutgoingLinks
byItem map[*item]string
byResult map[string]map[string]struct{}
}
type itemWithOutgoingLinks struct {
*item
links map[nlink][]string
}
func (cs *cacheKeyStorage) Exists(id string) bool {
_, ok := cs.byID[id]
return ok
}
func (cs *cacheKeyStorage) Walk(func(id string) error) error {
return nil
}
func (cs *cacheKeyStorage) WalkResults(id string, fn func(solver.CacheResult) error) error {
it, ok := cs.byID[id]
if !ok {
return nil
}
if res := it.result; res != nil {
return fn(solver.CacheResult{ID: remoteID(res), CreatedAt: it.resultTime})
}
return nil
}
func (cs *cacheKeyStorage) Load(id string, resultID string) (solver.CacheResult, error) {
it, ok := cs.byID[id]
if !ok {
return solver.CacheResult{}, nil
}
if res := it.result; res != nil {
return solver.CacheResult{ID: remoteID(res), CreatedAt: it.resultTime}, nil
}
return solver.CacheResult{}, nil
}
func (cs *cacheKeyStorage) AddResult(id string, res solver.CacheResult) error {
return nil
}
func (cs *cacheKeyStorage) Release(resultID string) error {
return nil
}
func (cs *cacheKeyStorage) AddLink(id string, link solver.CacheInfoLink, target string) error {
return nil
}
func (cs *cacheKeyStorage) WalkLinks(id string, link solver.CacheInfoLink, fn func(id string) error) error {
it, ok := cs.byID[id]
if !ok {
return nil
}
for _, id := range it.links[nlink{
dgst: outputKey(link.Digest, int(link.Output)),
input: int(link.Input),
selector: link.Selector.String(),
}] {
if err := fn(id); err != nil {
return err
}
}
return nil
}
// TODO:
func (cs *cacheKeyStorage) WalkBacklinks(id string, fn func(id string, link solver.CacheInfoLink) error) error {
return nil
}
func (cs *cacheKeyStorage) WalkIDsByResult(id string, fn func(id string) error) error {
ids := cs.byResult[id]
for id := range ids {
if err := fn(id); err != nil {
return err
}
}
return nil
}
func (cs *cacheKeyStorage) HasLink(id string, link solver.CacheInfoLink, target string) bool {
l := nlink{
dgst: outputKey(link.Digest, int(link.Output)),
input: int(link.Input),
selector: link.Selector.String(),
}
if it, ok := cs.byID[id]; ok {
for _, id := range it.links[l] {
if id == target {
return true
}
}
}
return false
}
type cacheResultStorage struct {
w worker.Worker
byID map[string]*itemWithOutgoingLinks
byResult map[string]map[string]struct{}
}
func (cs *cacheResultStorage) Save(res solver.Result) (solver.CacheResult, error) {
return solver.CacheResult{}, errors.Errorf("importer is immutable")
}
func (cs *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult) (solver.Result, error) {
remote, err := cs.LoadRemote(ctx, res)
if err != nil {
return nil, err
}
ref, err := cs.w.FromRemote(ctx, remote)
if err != nil {
return nil, err
}
return worker.NewWorkerRefResult(ref, cs.w), nil
}
func (cs *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheResult) (*solver.Remote, error) {
if r := cs.byResultID(res.ID); r != nil {
return r, nil
}
return nil, errors.WithStack(solver.ErrNotFound)
}
func (cs *cacheResultStorage) Exists(id string) bool {
return cs.byResultID(id) != nil
}
func (cs *cacheResultStorage) byResultID(resultID string) *solver.Remote {
m, ok := cs.byResult[resultID]
if !ok || len(m) == 0 {
return nil
}
for id := range m {
it, ok := cs.byID[id]
if ok {
if r := it.result; r != nil {
return r
}
}
}
return nil
}
// unique ID per remote. this ID is not stable.
func remoteID(r *solver.Remote) string {
dgstr := digest.Canonical.Digester()
for _, desc := range r.Descriptors {
dgstr.Hash().Write([]byte(desc.Digest))
}
return dgstr.Digest().String()
}

View file

@ -0,0 +1,127 @@
package cacheimport
import (
"time"
"github.com/containerd/containerd/content"
"github.com/moby/buildkit/solver"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
func NewCacheChains() *CacheChains {
return &CacheChains{visited: map[interface{}]struct{}{}}
}
type CacheChains struct {
items []*item
visited map[interface{}]struct{}
}
func (c *CacheChains) Add(dgst digest.Digest) solver.CacheExporterRecord {
it := &item{c: c, dgst: dgst}
c.items = append(c.items, it)
return it
}
func (c *CacheChains) Visit(v interface{}) {
c.visited[v] = struct{}{}
}
func (c *CacheChains) Visited(v interface{}) bool {
_, ok := c.visited[v]
return ok
}
func (c *CacheChains) normalize() error {
st := &normalizeState{
added: map[*item]*item{},
links: map[*item]map[nlink]map[digest.Digest]struct{}{},
byKey: map[digest.Digest]*item{},
}
for _, it := range c.items {
_, err := normalizeItem(it, st)
if err != nil {
return err
}
}
items := make([]*item, 0, len(st.byKey))
for _, it := range st.byKey {
items = append(items, it)
}
c.items = items
return nil
}
func (c *CacheChains) Marshal() (*CacheConfig, DescriptorProvider, error) {
if err := c.normalize(); err != nil {
return nil, nil, err
}
st := &marshalState{
chainsByID: map[string]int{},
descriptors: DescriptorProvider{},
recordsByItem: map[*item]int{},
}
for _, it := range c.items {
if err := marshalItem(it, st); err != nil {
return nil, nil, err
}
}
cc := CacheConfig{
Layers: st.layers,
Records: st.records,
}
sortConfig(&cc)
return &cc, st.descriptors, nil
}
type DescriptorProvider map[digest.Digest]DescriptorProviderPair
type DescriptorProviderPair struct {
Descriptor ocispec.Descriptor
Provider content.Provider
}
type item struct {
c *CacheChains
dgst digest.Digest
result *solver.Remote
resultTime time.Time
links []map[link]struct{}
}
type link struct {
src *item
selector string
}
func (c *item) AddResult(createdAt time.Time, result *solver.Remote) {
c.resultTime = createdAt
c.result = result
}
func (c *item) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) {
src, ok := rec.(*item)
if !ok {
return
}
for {
if index < len(c.links) {
break
}
c.links = append(c.links, map[link]struct{}{})
}
c.links[index][link{src: src, selector: selector}] = struct{}{}
}
var _ solver.CacheExporterTarget = &CacheChains{}

View file

@ -0,0 +1,50 @@
package cacheimport
// Distibutable build cache
//
// Main manifest is OCI image index
// https://github.com/opencontainers/image-spec/blob/master/image-index.md .
// Manifests array contains descriptors to the cache layers and one instance of
// build cache config with media type application/vnd.buildkit.cacheconfig.v0 .
// The cache layer descripts need to have an annotation with uncompressed digest
// to allow deduplication on extraction and optionally "buildkit/createdat"
// annotation to support maintaining original timestamps.
//
// Cache config file layout:
//
//{
// "layers": [
// {
// "blob": "sha256:deadbeef", <- digest of layer blob in index
// "parent": -1 <- index of parent layer, -1 if no parent
// },
// {
// "blob": "sha256:deadbeef",
// "parent": 0
// }
// ],
//
// "records": [
// {
// "digest": "sha256:deadbeef", <- base digest for the record
// },
// {
// "digest": "sha256:deadbeef",
// "output": 1, <- optional output index
// "layers": [ <- optional array or layer chains
// {
// "createdat": "",
// "layer": 1, <- index to the layer
// }
// ],
// "inputs": [ <- dependant records
// [ <- index of the dependency (0)
// {
// "selector": "sel", <- optional selector
// "link": 0, <- index to the dependant record
// }
// ]
// ]
// }
// ]
// }

View file

@ -0,0 +1,102 @@
package cacheimport
import (
"encoding/json"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/contentutil"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
func Parse(configJSON []byte, provider DescriptorProvider, t solver.CacheExporterTarget) error {
var config CacheConfig
if err := json.Unmarshal(configJSON, &config); err != nil {
return err
}
cache := map[int]solver.CacheExporterRecord{}
for i := range config.Records {
if _, err := parseRecord(config, i, provider, t, cache); err != nil {
return err
}
}
return nil
}
func parseRecord(cc CacheConfig, idx int, provider DescriptorProvider, t solver.CacheExporterTarget, cache map[int]solver.CacheExporterRecord) (solver.CacheExporterRecord, error) {
if r, ok := cache[idx]; ok {
if r == nil {
return nil, errors.Errorf("invalid looping record")
}
return r, nil
}
if idx < 0 || idx >= len(cc.Records) {
return nil, errors.Errorf("invalid record ID: %d", idx)
}
rec := cc.Records[idx]
r := t.Add(rec.Digest)
cache[idx] = nil
for i, inputs := range rec.Inputs {
for _, inp := range inputs {
src, err := parseRecord(cc, inp.LinkIndex, provider, t, cache)
if err != nil {
return nil, err
}
r.LinkFrom(src, i, inp.Selector)
}
}
for _, res := range rec.Results {
visited := map[int]struct{}{}
remote, err := getRemoteChain(cc.Layers, res.LayerIndex, provider, visited)
if err != nil {
return nil, err
}
r.AddResult(res.CreatedAt, remote)
}
cache[idx] = r
return r, nil
}
func getRemoteChain(layers []CacheLayer, idx int, provider DescriptorProvider, visited map[int]struct{}) (*solver.Remote, error) {
if _, ok := visited[idx]; ok {
return nil, errors.Errorf("invalid looping layer")
}
visited[idx] = struct{}{}
if idx < 0 || idx >= len(layers) {
return nil, errors.Errorf("invalid layer index %d", idx)
}
l := layers[idx]
descPair, ok := provider[l.Blob]
if !ok {
return nil, errors.Errorf("missing blob for %s", l.Blob)
}
var r *solver.Remote
if l.ParentIndex != -1 {
var err error
r, err = getRemoteChain(layers, l.ParentIndex, provider, visited)
if err != nil {
return nil, err
}
r.Descriptors = append(r.Descriptors, descPair.Descriptor)
mp := contentutil.NewMultiProvider(r.Provider)
mp.Add(descPair.Descriptor.Digest, descPair.Provider)
r.Provider = mp
return r, nil
}
return &solver.Remote{
Descriptors: []ocispec.Descriptor{descPair.Descriptor},
Provider: descPair.Provider,
}, nil
}

View file

@ -0,0 +1,35 @@
package cacheimport
import (
"time"
digest "github.com/opencontainers/go-digest"
)
const CacheConfigMediaTypeV0 = "application/vnd.buildkit.cacheconfig.v0"
type CacheConfig struct {
Layers []CacheLayer `json:"layers,omitempty"`
Records []CacheRecord `json:"records,omitempty"`
}
type CacheLayer struct {
Blob digest.Digest `json:"blob,omitempty"`
ParentIndex int `json:"parent,omitempty"`
}
type CacheRecord struct {
Results []CacheResult `json:"layers,omitempty"`
Digest digest.Digest `json:"digest,omitempty"`
Inputs [][]CacheInput `json:"inputs,omitempty"`
}
type CacheResult struct {
LayerIndex int `json:"layer"`
CreatedAt time.Time `json:"createdAt,omitempty"`
}
type CacheInput struct {
Selector string `json:"selector,omitempty"`
LinkIndex int `json:"link"`
}

View file

@ -0,0 +1,306 @@
package cacheimport
import (
"fmt"
"sort"
"github.com/containerd/containerd/content"
"github.com/moby/buildkit/solver"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// sortConfig sorts the config structure to make sure it is deterministic
func sortConfig(cc *CacheConfig) {
type indexedLayer struct {
oldIndex int
newIndex int
l CacheLayer
}
unsortedLayers := make([]*indexedLayer, len(cc.Layers))
sortedLayers := make([]*indexedLayer, len(cc.Layers))
for i, l := range cc.Layers {
il := &indexedLayer{oldIndex: i, l: l}
unsortedLayers[i] = il
sortedLayers[i] = il
}
sort.Slice(sortedLayers, func(i, j int) bool {
li := sortedLayers[i].l
lj := sortedLayers[j].l
if li.Blob == lj.Blob {
return li.ParentIndex < lj.ParentIndex
}
return li.Blob < lj.Blob
})
for i, l := range sortedLayers {
l.newIndex = i
}
layers := make([]CacheLayer, len(sortedLayers))
for i, l := range sortedLayers {
if pID := l.l.ParentIndex; pID != -1 {
l.l.ParentIndex = unsortedLayers[pID].newIndex
}
layers[i] = l.l
}
type indexedRecord struct {
oldIndex int
newIndex int
r CacheRecord
}
unsortedRecords := make([]*indexedRecord, len(cc.Records))
sortedRecords := make([]*indexedRecord, len(cc.Records))
for i, r := range cc.Records {
ir := &indexedRecord{oldIndex: i, r: r}
unsortedRecords[i] = ir
sortedRecords[i] = ir
}
sort.Slice(sortedRecords, func(i, j int) bool {
ri := sortedRecords[i].r
rj := sortedRecords[j].r
if ri.Digest != rj.Digest {
return ri.Digest < rj.Digest
}
if len(ri.Inputs) != len(ri.Inputs) {
return len(ri.Inputs) < len(ri.Inputs)
}
for i, inputs := range ri.Inputs {
if len(ri.Inputs[i]) != len(rj.Inputs[i]) {
return len(ri.Inputs[i]) < len(rj.Inputs[i])
}
for j := range inputs {
if ri.Inputs[i][j].Selector != rj.Inputs[i][j].Selector {
return ri.Inputs[i][j].Selector != rj.Inputs[i][j].Selector
}
return cc.Records[ri.Inputs[i][j].LinkIndex].Digest < cc.Records[rj.Inputs[i][j].LinkIndex].Digest
}
}
return ri.Digest < rj.Digest
})
for i, l := range sortedRecords {
l.newIndex = i
}
records := make([]CacheRecord, len(sortedRecords))
for i, r := range sortedRecords {
for j := range r.r.Results {
r.r.Results[j].LayerIndex = unsortedLayers[r.r.Results[j].LayerIndex].newIndex
}
for j, inputs := range r.r.Inputs {
for k := range inputs {
r.r.Inputs[j][k].LinkIndex = unsortedRecords[r.r.Inputs[j][k].LinkIndex].newIndex
}
sort.Slice(inputs, func(i, j int) bool {
return inputs[i].LinkIndex < inputs[j].LinkIndex
})
}
records[i] = r.r
}
cc.Layers = layers
cc.Records = records
}
func outputKey(dgst digest.Digest, idx int) digest.Digest {
return digest.FromBytes([]byte(fmt.Sprintf("%s@%d", dgst, idx)))
}
type nlink struct {
dgst digest.Digest
input int
selector string
}
type normalizeState struct {
added map[*item]*item
links map[*item]map[nlink]map[digest.Digest]struct{}
byKey map[digest.Digest]*item
next int
}
func normalizeItem(it *item, state *normalizeState) (*item, error) {
if it2, ok := state.added[it]; ok {
return it2, nil
}
if len(it.links) == 0 {
id := it.dgst
if it2, ok := state.byKey[id]; ok {
state.added[it] = it2
return it2, nil
}
state.byKey[id] = it
state.added[it] = it
return nil, nil
}
matches := map[digest.Digest]struct{}{}
// check if there is already a matching record
for i, m := range it.links {
if len(m) == 0 {
return nil, errors.Errorf("invalid incomplete links")
}
for l := range m {
nl := nlink{dgst: it.dgst, input: i, selector: l.selector}
it2, err := normalizeItem(l.src, state)
if err != nil {
return nil, err
}
links := state.links[it2][nl]
if i == 0 {
for id := range links {
matches[id] = struct{}{}
}
} else {
for id := range matches {
if _, ok := links[id]; !ok {
delete(matches, id)
}
}
}
}
}
var id digest.Digest
links := it.links
if len(matches) > 0 {
for m := range matches {
if id == "" || id > m {
id = m
}
}
} else {
// keep tmp IDs deterministic
state.next++
id = digest.FromBytes([]byte(fmt.Sprintf("%d", state.next)))
state.byKey[id] = it
it.links = make([]map[link]struct{}, len(it.links))
for i := range it.links {
it.links[i] = map[link]struct{}{}
}
}
it2 := state.byKey[id]
state.added[it] = it2
for i, m := range links {
for l := range m {
subIt, err := normalizeItem(l.src, state)
if err != nil {
return nil, err
}
it2.links[i][link{src: subIt, selector: l.selector}] = struct{}{}
nl := nlink{dgst: it.dgst, input: i, selector: l.selector}
if _, ok := state.links[subIt]; !ok {
state.links[subIt] = map[nlink]map[digest.Digest]struct{}{}
}
if _, ok := state.links[subIt][nl]; !ok {
state.links[subIt][nl] = map[digest.Digest]struct{}{}
}
state.links[subIt][nl][id] = struct{}{}
}
}
return it2, nil
}
type marshalState struct {
layers []CacheLayer
chainsByID map[string]int
descriptors DescriptorProvider
records []CacheRecord
recordsByItem map[*item]int
}
func marshalRemote(r *solver.Remote, state *marshalState) string {
if len(r.Descriptors) == 0 {
return ""
}
type Remote struct {
Descriptors []ocispec.Descriptor
Provider content.Provider
}
var parentID string
if len(r.Descriptors) > 1 {
r2 := &solver.Remote{
Descriptors: r.Descriptors[:len(r.Descriptors)-1],
Provider: r.Provider,
}
parentID = marshalRemote(r2, state)
}
desc := r.Descriptors[len(r.Descriptors)-1]
state.descriptors[desc.Digest] = DescriptorProviderPair{
Descriptor: desc,
Provider: r.Provider,
}
id := desc.Digest.String() + parentID
if _, ok := state.chainsByID[id]; ok {
return id
}
state.chainsByID[id] = len(state.layers)
l := CacheLayer{
Blob: desc.Digest,
ParentIndex: -1,
}
if parentID != "" {
l.ParentIndex = state.chainsByID[parentID]
}
state.layers = append(state.layers, l)
return id
}
func marshalItem(it *item, state *marshalState) error {
if _, ok := state.recordsByItem[it]; ok {
return nil
}
rec := CacheRecord{
Digest: it.dgst,
Inputs: make([][]CacheInput, len(it.links)),
}
for i, m := range it.links {
for l := range m {
if err := marshalItem(l.src, state); err != nil {
return err
}
idx, ok := state.recordsByItem[l.src]
if !ok {
return errors.Errorf("invalid source record: %v", l.src)
}
rec.Inputs[i] = append(rec.Inputs[i], CacheInput{
Selector: l.selector,
LinkIndex: idx,
})
}
}
if it.result != nil {
id := marshalRemote(it.result, state)
if id != "" {
idx, ok := state.chainsByID[id]
if !ok {
return errors.Errorf("parent chainid not found")
}
rec.Results = append(rec.Results, CacheResult{LayerIndex: idx, CreatedAt: it.resultTime})
}
}
state.recordsByItem[it] = len(state.records)
state.records = append(state.records, rec)
return nil
}

136
vendor/github.com/moby/buildkit/client/client.go generated vendored Normal file
View file

@ -0,0 +1,136 @@
package client
import (
"context"
"crypto/tls"
"crypto/x509"
"io/ioutil"
"time"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/util/appdefaults"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
type Client struct {
conn *grpc.ClientConn
}
type ClientOpt interface{}
// New returns a new buildkit client. Address can be empty for the system-default address.
func New(address string, opts ...ClientOpt) (*Client, error) {
gopts := []grpc.DialOption{
grpc.WithDialer(dialer),
grpc.FailOnNonTempDialError(true),
}
needWithInsecure := true
for _, o := range opts {
if _, ok := o.(*withBlockOpt); ok {
gopts = append(gopts, grpc.WithBlock(), grpc.FailOnNonTempDialError(true))
}
if credInfo, ok := o.(*withCredentials); ok {
opt, err := loadCredentials(credInfo)
if err != nil {
return nil, err
}
gopts = append(gopts, opt)
needWithInsecure = false
}
if wt, ok := o.(*withTracer); ok {
gopts = append(gopts,
grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(wt.tracer, otgrpc.LogPayloads())),
grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(wt.tracer)))
}
}
if needWithInsecure {
gopts = append(gopts, grpc.WithInsecure())
}
if address == "" {
address = appdefaults.Address
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
conn, err := grpc.DialContext(ctx, address, gopts...)
if err != nil {
return nil, errors.Wrapf(err, "failed to dial %q . make sure buildkitd is running", address)
}
c := &Client{
conn: conn,
}
return c, nil
}
func (c *Client) controlClient() controlapi.ControlClient {
return controlapi.NewControlClient(c.conn)
}
func (c *Client) Close() error {
return c.conn.Close()
}
type withBlockOpt struct{}
func WithBlock() ClientOpt {
return &withBlockOpt{}
}
type withCredentials struct {
ServerName string
CACert string
Cert string
Key string
}
// WithCredentials configures the TLS parameters of the client.
// Arguments:
// * serverName: specifies the name of the target server
// * ca: specifies the filepath of the CA certificate to use for verification
// * cert: specifies the filepath of the client certificate
// * key: specifies the filepath of the client key
func WithCredentials(serverName, ca, cert, key string) ClientOpt {
return &withCredentials{serverName, ca, cert, key}
}
func loadCredentials(opts *withCredentials) (grpc.DialOption, error) {
ca, err := ioutil.ReadFile(opts.CACert)
if err != nil {
return nil, errors.Wrap(err, "could not read ca certificate")
}
certPool := x509.NewCertPool()
if ok := certPool.AppendCertsFromPEM(ca); !ok {
return nil, errors.New("failed to append ca certs")
}
cfg := &tls.Config{
ServerName: opts.ServerName,
RootCAs: certPool,
}
// we will produce an error if the user forgot about either cert or key if at least one is specified
if opts.Cert != "" || opts.Key != "" {
cert, err := tls.LoadX509KeyPair(opts.Cert, opts.Key)
if err != nil {
return nil, errors.Wrap(err, "could not read certificate/key")
}
cfg.Certificates = []tls.Certificate{cert}
cfg.BuildNameToCertificate()
}
return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil
}
func WithTracer(t opentracing.Tracer) ClientOpt {
return &withTracer{t}
}
type withTracer struct {
tracer opentracing.Tracer
}

19
vendor/github.com/moby/buildkit/client/client_unix.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
// +build !windows
package client
import (
"net"
"strings"
"time"
"github.com/pkg/errors"
)
func dialer(address string, timeout time.Duration) (net.Conn, error) {
addrParts := strings.SplitN(address, "://", 2)
if len(addrParts) != 2 {
return nil, errors.Errorf("invalid address %s", address)
}
return net.DialTimeout(addrParts[0], addrParts[1], timeout)
}

View file

@ -0,0 +1,24 @@
package client
import (
"net"
"strings"
"time"
"github.com/Microsoft/go-winio"
"github.com/pkg/errors"
)
func dialer(address string, timeout time.Duration) (net.Conn, error) {
addrParts := strings.SplitN(address, "://", 2)
if len(addrParts) != 2 {
return nil, errors.Errorf("invalid address %s", address)
}
switch addrParts[0] {
case "npipe":
address = strings.Replace(addrParts[1], "/", "\\", 0)
return winio.DialPipe(address, &timeout)
default:
return net.DialTimeout(addrParts[0], addrParts[1], timeout)
}
}

73
vendor/github.com/moby/buildkit/client/diskusage.go generated vendored Normal file
View file

@ -0,0 +1,73 @@
package client
import (
"context"
"sort"
"time"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/pkg/errors"
)
type UsageInfo struct {
ID string
Mutable bool
InUse bool
Size int64
CreatedAt time.Time
LastUsedAt *time.Time
UsageCount int
Parent string
Description string
}
func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*UsageInfo, error) {
info := &DiskUsageInfo{}
for _, o := range opts {
o(info)
}
req := &controlapi.DiskUsageRequest{Filter: info.Filter}
resp, err := c.controlClient().DiskUsage(ctx, req)
if err != nil {
return nil, errors.Wrap(err, "failed to call diskusage")
}
var du []*UsageInfo
for _, d := range resp.Record {
du = append(du, &UsageInfo{
ID: d.ID,
Mutable: d.Mutable,
InUse: d.InUse,
Size: d.Size_,
Parent: d.Parent,
CreatedAt: d.CreatedAt,
Description: d.Description,
UsageCount: int(d.UsageCount),
LastUsedAt: d.LastUsedAt,
})
}
sort.Slice(du, func(i, j int) bool {
if du[i].Size == du[j].Size {
return du[i].ID > du[j].ID
}
return du[i].Size > du[j].Size
})
return du, nil
}
type DiskUsageOption func(*DiskUsageInfo)
type DiskUsageInfo struct {
Filter string
}
func WithFilter(f string) DiskUsageOption {
return func(di *DiskUsageInfo) {
di.Filter = f
}
}

8
vendor/github.com/moby/buildkit/client/exporters.go generated vendored Normal file
View file

@ -0,0 +1,8 @@
package client
const (
ExporterImage = "image"
ExporterLocal = "local"
ExporterOCI = "oci"
ExporterDocker = "docker"
)

45
vendor/github.com/moby/buildkit/client/graph.go generated vendored Normal file
View file

@ -0,0 +1,45 @@
package client
import (
"time"
digest "github.com/opencontainers/go-digest"
)
type Vertex struct {
Digest digest.Digest
Inputs []digest.Digest
Name string
Started *time.Time
Completed *time.Time
Cached bool
Error string
}
type VertexStatus struct {
ID string
Vertex digest.Digest
Name string
Total int64
Current int64
Timestamp time.Time
Started *time.Time
Completed *time.Time
}
type VertexLog struct {
Vertex digest.Digest
Stream int
Data []byte
Timestamp time.Time
}
type SolveStatus struct {
Vertexes []*Vertex
Statuses []*VertexStatus
Logs []*VertexLog
}
type SolveResponse struct {
ExporterResponse map[string]string
}

387
vendor/github.com/moby/buildkit/client/llb/exec.go generated vendored Normal file
View file

@ -0,0 +1,387 @@
package llb
import (
_ "crypto/sha256"
"sort"
"github.com/moby/buildkit/solver/pb"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
type Meta struct {
Args []string
Env EnvList
Cwd string
User string
ProxyEnv *ProxyEnv
}
func NewExecOp(root Output, meta Meta, readOnly bool, md OpMetadata) *ExecOp {
e := &ExecOp{meta: meta, cachedOpMetadata: md}
rootMount := &mount{
target: pb.RootMount,
source: root,
readonly: readOnly,
}
e.mounts = append(e.mounts, rootMount)
if readOnly {
e.root = root
} else {
e.root = &output{vertex: e, getIndex: e.getMountIndexFn(rootMount)}
}
rootMount.output = e.root
return e
}
type mount struct {
target string
readonly bool
source Output
output Output
selector string
cacheID string
tmpfs bool
// hasOutput bool
}
type ExecOp struct {
root Output
mounts []*mount
meta Meta
cachedPBDigest digest.Digest
cachedPB []byte
cachedOpMetadata OpMetadata
isValidated bool
}
func (e *ExecOp) AddMount(target string, source Output, opt ...MountOption) Output {
m := &mount{
target: target,
source: source,
}
for _, o := range opt {
o(m)
}
e.mounts = append(e.mounts, m)
if m.readonly {
m.output = source
} else if m.tmpfs {
m.output = &output{vertex: e, err: errors.Errorf("tmpfs mount for %s can't be used as a parent", target)}
} else {
m.output = &output{vertex: e, getIndex: e.getMountIndexFn(m)}
}
e.cachedPB = nil
e.isValidated = false
return m.output
}
func (e *ExecOp) GetMount(target string) Output {
for _, m := range e.mounts {
if m.target == target {
return m.output
}
}
return nil
}
func (e *ExecOp) Validate() error {
if e.isValidated {
return nil
}
if len(e.meta.Args) == 0 {
return errors.Errorf("arguments are required")
}
if e.meta.Cwd == "" {
return errors.Errorf("working directory is required")
}
for _, m := range e.mounts {
if m.source != nil {
if err := m.source.Vertex().Validate(); err != nil {
return err
}
}
}
e.isValidated = true
return nil
}
func (e *ExecOp) Marshal() (digest.Digest, []byte, *OpMetadata, error) {
if e.cachedPB != nil {
return e.cachedPBDigest, e.cachedPB, &e.cachedOpMetadata, nil
}
if err := e.Validate(); err != nil {
return "", nil, nil, err
}
// make sure mounts are sorted
sort.Slice(e.mounts, func(i, j int) bool {
return e.mounts[i].target < e.mounts[j].target
})
peo := &pb.ExecOp{
Meta: &pb.Meta{
Args: e.meta.Args,
Env: e.meta.Env.ToArray(),
Cwd: e.meta.Cwd,
User: e.meta.User,
},
}
if p := e.meta.ProxyEnv; p != nil {
peo.Meta.ProxyEnv = &pb.ProxyEnv{
HttpProxy: p.HttpProxy,
HttpsProxy: p.HttpsProxy,
FtpProxy: p.FtpProxy,
NoProxy: p.NoProxy,
}
}
pop := &pb.Op{
Op: &pb.Op_Exec{
Exec: peo,
},
}
outIndex := 0
for _, m := range e.mounts {
inputIndex := pb.InputIndex(len(pop.Inputs))
if m.source != nil {
if m.tmpfs {
return "", nil, nil, errors.Errorf("tmpfs mounts must use scratch")
}
inp, err := m.source.ToInput()
if err != nil {
return "", nil, nil, err
}
newInput := true
for i, inp2 := range pop.Inputs {
if *inp == *inp2 {
inputIndex = pb.InputIndex(i)
newInput = false
break
}
}
if newInput {
pop.Inputs = append(pop.Inputs, inp)
}
} else {
inputIndex = pb.Empty
}
outputIndex := pb.OutputIndex(-1)
if !m.readonly && m.cacheID == "" && !m.tmpfs {
outputIndex = pb.OutputIndex(outIndex)
outIndex++
}
pm := &pb.Mount{
Input: inputIndex,
Dest: m.target,
Readonly: m.readonly,
Output: outputIndex,
Selector: m.selector,
}
if m.cacheID != "" {
pm.MountType = pb.MountType_CACHE
pm.CacheOpt = &pb.CacheOpt{
ID: m.cacheID,
}
}
if m.tmpfs {
pm.MountType = pb.MountType_TMPFS
}
peo.Mounts = append(peo.Mounts, pm)
}
dt, err := pop.Marshal()
if err != nil {
return "", nil, nil, err
}
e.cachedPBDigest = digest.FromBytes(dt)
e.cachedPB = dt
return e.cachedPBDigest, dt, &e.cachedOpMetadata, nil
}
func (e *ExecOp) Output() Output {
return e.root
}
func (e *ExecOp) Inputs() (inputs []Output) {
mm := map[Output]struct{}{}
for _, m := range e.mounts {
if m.source != nil {
mm[m.source] = struct{}{}
}
}
for o := range mm {
inputs = append(inputs, o)
}
return
}
func (e *ExecOp) getMountIndexFn(m *mount) func() (pb.OutputIndex, error) {
return func() (pb.OutputIndex, error) {
// make sure mounts are sorted
sort.Slice(e.mounts, func(i, j int) bool {
return e.mounts[i].target < e.mounts[j].target
})
i := 0
for _, m2 := range e.mounts {
if m2.readonly || m2.cacheID != "" {
continue
}
if m == m2 {
return pb.OutputIndex(i), nil
}
i++
}
return pb.OutputIndex(0), errors.Errorf("invalid mount: %s", m.target)
}
}
type ExecState struct {
State
exec *ExecOp
}
func (e ExecState) AddMount(target string, source State, opt ...MountOption) State {
return source.WithOutput(e.exec.AddMount(target, source.Output(), opt...))
}
func (e ExecState) GetMount(target string) State {
return NewState(e.exec.GetMount(target))
}
func (e ExecState) Root() State {
return e.State
}
type MountOption func(*mount)
func Readonly(m *mount) {
m.readonly = true
}
func SourcePath(src string) MountOption {
return func(m *mount) {
m.selector = src
}
}
func AsPersistentCacheDir(id string) MountOption {
return func(m *mount) {
m.cacheID = id
}
}
func Tmpfs() MountOption {
return func(m *mount) {
m.tmpfs = true
}
}
type RunOption interface {
SetRunOption(es *ExecInfo)
}
type runOptionFunc func(*ExecInfo)
func (fn runOptionFunc) SetRunOption(ei *ExecInfo) {
fn(ei)
}
func Shlex(str string) RunOption {
return Shlexf(str)
}
func Shlexf(str string, v ...interface{}) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = shlexf(str, v...)(ei.State)
})
}
func Args(a []string) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = args(a...)(ei.State)
})
}
func AddEnv(key, value string) RunOption {
return AddEnvf(key, value)
}
func AddEnvf(key, value string, v ...interface{}) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = ei.State.AddEnvf(key, value, v...)
})
}
func User(str string) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = ei.State.User(str)
})
}
func Dir(str string) RunOption {
return Dirf(str)
}
func Dirf(str string, v ...interface{}) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = ei.State.Dirf(str, v...)
})
}
func Reset(s State) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = ei.State.Reset(s)
})
}
func With(so ...StateOption) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = ei.State.With(so...)
})
}
func AddMount(dest string, mountState State, opts ...MountOption) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.Mounts = append(ei.Mounts, MountInfo{dest, mountState.Output(), opts})
})
}
func ReadonlyRootFS() RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.ReadonlyRootFS = true
})
}
func WithProxy(ps ProxyEnv) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.ProxyEnv = &ps
})
}
type ExecInfo struct {
opMetaWrapper
State State
Mounts []MountInfo
ReadonlyRootFS bool
ProxyEnv *ProxyEnv
}
type MountInfo struct {
Target string
Source Output
Opts []MountOption
}
type ProxyEnv struct {
HttpProxy string
HttpsProxy string
FtpProxy string
NoProxy string
}

View file

@ -0,0 +1,87 @@
package imagemetaresolver
import (
"context"
"net/http"
"sync"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/docker/docker/pkg/locker"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/imageutil"
digest "github.com/opencontainers/go-digest"
)
var defaultImageMetaResolver llb.ImageMetaResolver
var defaultImageMetaResolverOnce sync.Once
var WithDefault = llb.ImageOptionFunc(func(ii *llb.ImageInfo) {
llb.WithMetaResolver(Default()).SetImageOption(ii)
})
type imageMetaResolverOpts struct {
platform string
}
type ImageMetaResolverOpt func(o *imageMetaResolverOpts)
func WithPlatform(p string) ImageMetaResolverOpt {
return func(o *imageMetaResolverOpts) {
o.platform = p
}
}
func New(with ...ImageMetaResolverOpt) llb.ImageMetaResolver {
var opts imageMetaResolverOpts
for _, f := range with {
f(&opts)
}
return &imageMetaResolver{
resolver: docker.NewResolver(docker.ResolverOptions{
Client: http.DefaultClient,
}),
platform: opts.platform,
buffer: contentutil.NewBuffer(),
cache: map[string]resolveResult{},
locker: locker.New(),
}
}
func Default() llb.ImageMetaResolver {
defaultImageMetaResolverOnce.Do(func() {
defaultImageMetaResolver = New()
})
return defaultImageMetaResolver
}
type imageMetaResolver struct {
resolver remotes.Resolver
buffer contentutil.Buffer
platform string
locker *locker.Locker
cache map[string]resolveResult
}
type resolveResult struct {
config []byte
dgst digest.Digest
}
func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error) {
imr.locker.Lock(ref)
defer imr.locker.Unlock(ref)
if res, ok := imr.cache[ref]; ok {
return res.dgst, res.config, nil
}
dgst, config, err := imageutil.Config(ctx, ref, imr.resolver, imr.buffer, imr.platform)
if err != nil {
return "", nil, err
}
imr.cache[ref] = resolveResult{dgst: dgst, config: config}
return dgst, config, nil
}

60
vendor/github.com/moby/buildkit/client/llb/marshal.go generated vendored Normal file
View file

@ -0,0 +1,60 @@
package llb
import (
"io"
"io/ioutil"
"github.com/moby/buildkit/solver/pb"
digest "github.com/opencontainers/go-digest"
)
// Definition is the LLB definition structure with per-vertex metadata entries
// Corresponds to the Definition structure defined in solver/pb.Definition.
type Definition struct {
Def [][]byte
Metadata map[digest.Digest]OpMetadata
}
func (def *Definition) ToPB() *pb.Definition {
md := make(map[digest.Digest]OpMetadata)
for k, v := range def.Metadata {
md[k] = v
}
return &pb.Definition{
Def: def.Def,
Metadata: md,
}
}
func (def *Definition) FromPB(x *pb.Definition) {
def.Def = x.Def
def.Metadata = make(map[digest.Digest]OpMetadata)
for k, v := range x.Metadata {
def.Metadata[k] = v
}
}
type OpMetadata = pb.OpMetadata
func WriteTo(def *Definition, w io.Writer) error {
b, err := def.ToPB().Marshal()
if err != nil {
return err
}
_, err = w.Write(b)
return err
}
func ReadFrom(r io.Reader) (*Definition, error) {
b, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
var pbDef pb.Definition
if err := pbDef.Unmarshal(b); err != nil {
return nil, err
}
var def Definition
def.FromPB(&pbDef)
return &def, nil
}

152
vendor/github.com/moby/buildkit/client/llb/meta.go generated vendored Normal file
View file

@ -0,0 +1,152 @@
package llb
import (
"fmt"
"path"
"github.com/google/shlex"
)
type contextKeyT string
var (
keyArgs = contextKeyT("llb.exec.args")
keyDir = contextKeyT("llb.exec.dir")
keyEnv = contextKeyT("llb.exec.env")
keyUser = contextKeyT("llb.exec.user")
)
func addEnv(key, value string) StateOption {
return addEnvf(key, value)
}
func addEnvf(key, value string, v ...interface{}) StateOption {
return func(s State) State {
return s.WithValue(keyEnv, getEnv(s).AddOrReplace(key, fmt.Sprintf(value, v...)))
}
}
func dir(str string) StateOption {
return dirf(str)
}
func dirf(str string, v ...interface{}) StateOption {
return func(s State) State {
value := fmt.Sprintf(str, v...)
if !path.IsAbs(value) {
prev := getDir(s)
if prev == "" {
prev = "/"
}
value = path.Join(prev, value)
}
return s.WithValue(keyDir, value)
}
}
func user(str string) StateOption {
return func(s State) State {
return s.WithValue(keyUser, str)
}
}
func reset(s_ State) StateOption {
return func(s State) State {
s = NewState(s.Output())
s.ctx = s_.ctx
return s
}
}
func getEnv(s State) EnvList {
v := s.Value(keyEnv)
if v != nil {
return v.(EnvList)
}
return EnvList{}
}
func getDir(s State) string {
v := s.Value(keyDir)
if v != nil {
return v.(string)
}
return ""
}
func getArgs(s State) []string {
v := s.Value(keyArgs)
if v != nil {
return v.([]string)
}
return nil
}
func getUser(s State) string {
v := s.Value(keyUser)
if v != nil {
return v.(string)
}
return ""
}
func args(args ...string) StateOption {
return func(s State) State {
return s.WithValue(keyArgs, args)
}
}
func shlexf(str string, v ...interface{}) StateOption {
return func(s State) State {
arg, err := shlex.Split(fmt.Sprintf(str, v...))
if err != nil {
// TODO: handle error
}
return args(arg...)(s)
}
}
type EnvList []KeyValue
type KeyValue struct {
key string
value string
}
func (e EnvList) AddOrReplace(k, v string) EnvList {
e = e.Delete(k)
e = append(e, KeyValue{key: k, value: v})
return e
}
func (e EnvList) Delete(k string) EnvList {
e = append([]KeyValue(nil), e...)
if i, ok := e.Index(k); ok {
return append(e[:i], e[i+1:]...)
}
return e
}
func (e EnvList) Get(k string) (string, bool) {
if index, ok := e.Index(k); ok {
return e[index].value, true
}
return "", false
}
func (e EnvList) Index(k string) (int, bool) {
for i, kv := range e {
if kv.key == k {
return i, true
}
}
return -1, false
}
func (e EnvList) ToArray() []string {
out := make([]string, 0, len(e))
for _, kv := range e {
out = append(out, kv.key+"="+kv.value)
}
return out
}

17
vendor/github.com/moby/buildkit/client/llb/resolver.go generated vendored Normal file
View file

@ -0,0 +1,17 @@
package llb
import (
"context"
digest "github.com/opencontainers/go-digest"
)
func WithMetaResolver(mr ImageMetaResolver) ImageOption {
return ImageOptionFunc(func(ii *ImageInfo) {
ii.metaResolver = mr
})
}
type ImageMetaResolver interface {
ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error)
}

359
vendor/github.com/moby/buildkit/client/llb/source.go generated vendored Normal file
View file

@ -0,0 +1,359 @@
package llb
import (
"context"
_ "crypto/sha256"
"encoding/json"
"os"
"strconv"
"strings"
"github.com/docker/distribution/reference"
"github.com/moby/buildkit/solver/pb"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
type SourceOp struct {
id string
attrs map[string]string
output Output
cachedPBDigest digest.Digest
cachedPB []byte
cachedOpMetadata OpMetadata
err error
}
func NewSource(id string, attrs map[string]string, md OpMetadata) *SourceOp {
s := &SourceOp{
id: id,
attrs: attrs,
cachedOpMetadata: md,
}
s.output = &output{vertex: s}
return s
}
func (s *SourceOp) Validate() error {
if s.err != nil {
return s.err
}
if s.id == "" {
return errors.Errorf("source identifier can't be empty")
}
return nil
}
func (s *SourceOp) Marshal() (digest.Digest, []byte, *OpMetadata, error) {
if s.cachedPB != nil {
return s.cachedPBDigest, s.cachedPB, &s.cachedOpMetadata, nil
}
if err := s.Validate(); err != nil {
return "", nil, nil, err
}
proto := &pb.Op{
Op: &pb.Op_Source{
Source: &pb.SourceOp{Identifier: s.id, Attrs: s.attrs},
},
}
dt, err := proto.Marshal()
if err != nil {
return "", nil, nil, err
}
s.cachedPB = dt
s.cachedPBDigest = digest.FromBytes(dt)
return s.cachedPBDigest, dt, &s.cachedOpMetadata, nil
}
func (s *SourceOp) Output() Output {
return s.output
}
func (s *SourceOp) Inputs() []Output {
return nil
}
func Source(id string) State {
return NewState(NewSource(id, nil, OpMetadata{}).Output())
}
func Image(ref string, opts ...ImageOption) State {
r, err := reference.ParseNormalizedNamed(ref)
if err == nil {
ref = reference.TagNameOnly(r).String()
}
var info ImageInfo
for _, opt := range opts {
opt.SetImageOption(&info)
}
src := NewSource("docker-image://"+ref, nil, info.Metadata()) // controversial
if err != nil {
src.err = err
}
if info.metaResolver != nil {
_, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref)
if err != nil {
src.err = err
} else {
var img struct {
Config struct {
Env []string `json:"Env,omitempty"`
WorkingDir string `json:"WorkingDir,omitempty"`
User string `json:"User,omitempty"`
} `json:"config,omitempty"`
}
if err := json.Unmarshal(dt, &img); err != nil {
src.err = err
} else {
st := NewState(src.Output())
for _, env := range img.Config.Env {
parts := strings.SplitN(env, "=", 2)
if len(parts[0]) > 0 {
var v string
if len(parts) > 1 {
v = parts[1]
}
st = st.AddEnv(parts[0], v)
}
}
st = st.Dir(img.Config.WorkingDir)
return st
}
}
}
return NewState(src.Output())
}
type ImageOption interface {
SetImageOption(*ImageInfo)
}
type ImageOptionFunc func(*ImageInfo)
func (fn ImageOptionFunc) SetImageOption(ii *ImageInfo) {
fn(ii)
}
type ImageInfo struct {
opMetaWrapper
metaResolver ImageMetaResolver
}
func Git(remote, ref string, opts ...GitOption) State {
url := ""
for _, prefix := range []string{
"http://", "https://", "git://", "git@",
} {
if strings.HasPrefix(remote, prefix) {
url = strings.Split(remote, "#")[0]
remote = strings.TrimPrefix(remote, prefix)
}
}
id := remote
if ref != "" {
id += "#" + ref
}
gi := &GitInfo{}
for _, o := range opts {
o.SetGitOption(gi)
}
attrs := map[string]string{}
if gi.KeepGitDir {
attrs[pb.AttrKeepGitDir] = "true"
}
if url != "" {
attrs[pb.AttrFullRemoteURL] = url
}
source := NewSource("git://"+id, attrs, gi.Metadata())
return NewState(source.Output())
}
type GitOption interface {
SetGitOption(*GitInfo)
}
type gitOptionFunc func(*GitInfo)
func (fn gitOptionFunc) SetGitOption(gi *GitInfo) {
fn(gi)
}
type GitInfo struct {
opMetaWrapper
KeepGitDir bool
}
func KeepGitDir() GitOption {
return gitOptionFunc(func(gi *GitInfo) {
gi.KeepGitDir = true
})
}
func Scratch() State {
return NewState(nil)
}
func Local(name string, opts ...LocalOption) State {
gi := &LocalInfo{}
for _, o := range opts {
o.SetLocalOption(gi)
}
attrs := map[string]string{}
if gi.SessionID != "" {
attrs[pb.AttrLocalSessionID] = gi.SessionID
}
if gi.IncludePatterns != "" {
attrs[pb.AttrIncludePatterns] = gi.IncludePatterns
}
if gi.FollowPaths != "" {
attrs[pb.AttrFollowPaths] = gi.FollowPaths
}
if gi.ExcludePatterns != "" {
attrs[pb.AttrExcludePatterns] = gi.ExcludePatterns
}
if gi.SharedKeyHint != "" {
attrs[pb.AttrSharedKeyHint] = gi.SharedKeyHint
}
source := NewSource("local://"+name, attrs, gi.Metadata())
return NewState(source.Output())
}
type LocalOption interface {
SetLocalOption(*LocalInfo)
}
type localOptionFunc func(*LocalInfo)
func (fn localOptionFunc) SetLocalOption(li *LocalInfo) {
fn(li)
}
func SessionID(id string) LocalOption {
return localOptionFunc(func(li *LocalInfo) {
li.SessionID = id
})
}
func IncludePatterns(p []string) LocalOption {
return localOptionFunc(func(li *LocalInfo) {
if len(p) == 0 {
li.IncludePatterns = ""
return
}
dt, _ := json.Marshal(p) // empty on error
li.IncludePatterns = string(dt)
})
}
func FollowPaths(p []string) LocalOption {
return localOptionFunc(func(li *LocalInfo) {
if len(p) == 0 {
li.FollowPaths = ""
return
}
dt, _ := json.Marshal(p) // empty on error
li.FollowPaths = string(dt)
})
}
func ExcludePatterns(p []string) LocalOption {
return localOptionFunc(func(li *LocalInfo) {
if len(p) == 0 {
li.ExcludePatterns = ""
return
}
dt, _ := json.Marshal(p) // empty on error
li.ExcludePatterns = string(dt)
})
}
func SharedKeyHint(h string) LocalOption {
return localOptionFunc(func(li *LocalInfo) {
li.SharedKeyHint = h
})
}
type LocalInfo struct {
opMetaWrapper
SessionID string
IncludePatterns string
ExcludePatterns string
FollowPaths string
SharedKeyHint string
}
func HTTP(url string, opts ...HTTPOption) State {
hi := &HTTPInfo{}
for _, o := range opts {
o.SetHTTPOption(hi)
}
attrs := map[string]string{}
if hi.Checksum != "" {
attrs[pb.AttrHTTPChecksum] = hi.Checksum.String()
}
if hi.Filename != "" {
attrs[pb.AttrHTTPFilename] = hi.Filename
}
if hi.Perm != 0 {
attrs[pb.AttrHTTPPerm] = "0" + strconv.FormatInt(int64(hi.Perm), 8)
}
if hi.UID != 0 {
attrs[pb.AttrHTTPUID] = strconv.Itoa(hi.UID)
}
if hi.UID != 0 {
attrs[pb.AttrHTTPGID] = strconv.Itoa(hi.GID)
}
source := NewSource(url, attrs, hi.Metadata())
return NewState(source.Output())
}
type HTTPInfo struct {
opMetaWrapper
Checksum digest.Digest
Filename string
Perm int
UID int
GID int
}
type HTTPOption interface {
SetHTTPOption(*HTTPInfo)
}
type httpOptionFunc func(*HTTPInfo)
func (fn httpOptionFunc) SetHTTPOption(hi *HTTPInfo) {
fn(hi)
}
func Checksum(dgst digest.Digest) HTTPOption {
return httpOptionFunc(func(hi *HTTPInfo) {
hi.Checksum = dgst
})
}
func Chmod(perm os.FileMode) HTTPOption {
return httpOptionFunc(func(hi *HTTPInfo) {
hi.Perm = int(perm) & 0777
})
}
func Filename(name string) HTTPOption {
return httpOptionFunc(func(hi *HTTPInfo) {
hi.Filename = name
})
}
func Chown(uid, gid int) HTTPOption {
return httpOptionFunc(func(hi *HTTPInfo) {
hi.UID = uid
hi.GID = gid
})
}

316
vendor/github.com/moby/buildkit/client/llb/state.go generated vendored Normal file
View file

@ -0,0 +1,316 @@
package llb
import (
"context"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/system"
digest "github.com/opencontainers/go-digest"
)
type StateOption func(State) State
type Output interface {
ToInput() (*pb.Input, error)
Vertex() Vertex
}
type Vertex interface {
Validate() error
Marshal() (digest.Digest, []byte, *OpMetadata, error)
Output() Output
Inputs() []Output
}
func NewState(o Output) State {
s := State{
out: o,
ctx: context.Background(),
}
s = dir("/")(s)
s = addEnv("PATH", system.DefaultPathEnv)(s)
return s
}
type State struct {
out Output
ctx context.Context
}
func (s State) WithValue(k, v interface{}) State {
return State{
out: s.out,
ctx: context.WithValue(s.ctx, k, v),
}
}
func (s State) Value(k interface{}) interface{} {
return s.ctx.Value(k)
}
func (s State) Marshal(md ...MetadataOpt) (*Definition, error) {
def := &Definition{
Metadata: make(map[digest.Digest]OpMetadata, 0),
}
if s.Output() == nil {
return def, nil
}
def, err := marshal(s.Output().Vertex(), def, map[digest.Digest]struct{}{}, map[Vertex]struct{}{}, md)
if err != nil {
return def, err
}
inp, err := s.Output().ToInput()
if err != nil {
return def, err
}
proto := &pb.Op{Inputs: []*pb.Input{inp}}
dt, err := proto.Marshal()
if err != nil {
return def, err
}
def.Def = append(def.Def, dt)
return def, nil
}
func marshal(v Vertex, def *Definition, cache map[digest.Digest]struct{}, vertexCache map[Vertex]struct{}, md []MetadataOpt) (*Definition, error) {
if _, ok := vertexCache[v]; ok {
return def, nil
}
for _, inp := range v.Inputs() {
var err error
def, err = marshal(inp.Vertex(), def, cache, vertexCache, md)
if err != nil {
return def, err
}
}
dgst, dt, opMeta, err := v.Marshal()
if err != nil {
return def, err
}
vertexCache[v] = struct{}{}
if opMeta != nil {
m := mergeMetadata(def.Metadata[dgst], *opMeta)
for _, f := range md {
f.SetMetadataOption(&m)
}
def.Metadata[dgst] = m
}
if _, ok := cache[dgst]; ok {
return def, nil
}
def.Def = append(def.Def, dt)
cache[dgst] = struct{}{}
return def, nil
}
func (s State) Validate() error {
return s.Output().Vertex().Validate()
}
func (s State) Output() Output {
return s.out
}
func (s State) WithOutput(o Output) State {
return State{
out: o,
ctx: s.ctx,
}
}
func (s State) Run(ro ...RunOption) ExecState {
ei := &ExecInfo{State: s}
for _, o := range ro {
o.SetRunOption(ei)
}
meta := Meta{
Args: getArgs(ei.State),
Cwd: getDir(ei.State),
Env: getEnv(ei.State),
User: getUser(ei.State),
ProxyEnv: ei.ProxyEnv,
}
exec := NewExecOp(s.Output(), meta, ei.ReadonlyRootFS, ei.Metadata())
for _, m := range ei.Mounts {
exec.AddMount(m.Target, m.Source, m.Opts...)
}
return ExecState{
State: s.WithOutput(exec.Output()),
exec: exec,
}
}
func (s State) AddEnv(key, value string) State {
return s.AddEnvf(key, value)
}
func (s State) AddEnvf(key, value string, v ...interface{}) State {
return addEnvf(key, value, v...)(s)
}
func (s State) Dir(str string) State {
return s.Dirf(str)
}
func (s State) Dirf(str string, v ...interface{}) State {
return dirf(str, v...)(s)
}
func (s State) GetEnv(key string) (string, bool) {
return getEnv(s).Get(key)
}
func (s State) GetDir() string {
return getDir(s)
}
func (s State) GetArgs() []string {
return getArgs(s)
}
func (s State) Reset(s2 State) State {
return reset(s2)(s)
}
func (s State) User(v string) State {
return user(v)(s)
}
func (s State) With(so ...StateOption) State {
for _, o := range so {
s = o(s)
}
return s
}
type output struct {
vertex Vertex
getIndex func() (pb.OutputIndex, error)
err error
}
func (o *output) ToInput() (*pb.Input, error) {
if o.err != nil {
return nil, o.err
}
var index pb.OutputIndex
if o.getIndex != nil {
var err error
index, err = o.getIndex()
if err != nil {
return nil, err
}
}
dgst, _, _, err := o.vertex.Marshal()
if err != nil {
return nil, err
}
return &pb.Input{Digest: dgst, Index: index}, nil
}
func (o *output) Vertex() Vertex {
return o.vertex
}
type MetadataOpt interface {
SetMetadataOption(*OpMetadata)
RunOption
LocalOption
HTTPOption
ImageOption
GitOption
}
type metadataOptFunc func(m *OpMetadata)
func (fn metadataOptFunc) SetMetadataOption(m *OpMetadata) {
fn(m)
}
func (fn metadataOptFunc) SetRunOption(ei *ExecInfo) {
ei.ApplyMetadata(fn)
}
func (fn metadataOptFunc) SetLocalOption(li *LocalInfo) {
li.ApplyMetadata(fn)
}
func (fn metadataOptFunc) SetHTTPOption(hi *HTTPInfo) {
hi.ApplyMetadata(fn)
}
func (fn metadataOptFunc) SetImageOption(ii *ImageInfo) {
ii.ApplyMetadata(fn)
}
func (fn metadataOptFunc) SetGitOption(gi *GitInfo) {
gi.ApplyMetadata(fn)
}
func mergeMetadata(m1, m2 OpMetadata) OpMetadata {
if m2.IgnoreCache {
m1.IgnoreCache = true
}
if len(m2.Description) > 0 {
if m1.Description == nil {
m1.Description = make(map[string]string)
}
for k, v := range m2.Description {
m1.Description[k] = v
}
}
if m2.ExportCache != nil {
m1.ExportCache = m2.ExportCache
}
return m1
}
var IgnoreCache = metadataOptFunc(func(md *OpMetadata) {
md.IgnoreCache = true
})
func WithDescription(m map[string]string) MetadataOpt {
return metadataOptFunc(func(md *OpMetadata) {
md.Description = m
})
}
// WithExportCache forces results for this vertex to be exported with the cache
func WithExportCache() MetadataOpt {
return metadataOptFunc(func(md *OpMetadata) {
md.ExportCache = &pb.ExportCache{Value: true}
})
}
// WithoutExportCache sets results for this vertex to be not exported with
// the cache
func WithoutExportCache() MetadataOpt {
return metadataOptFunc(func(md *OpMetadata) {
// ExportCache with value false means to disable exporting
md.ExportCache = &pb.ExportCache{Value: false}
})
}
// WithoutDefaultExportCache resets the cache export for the vertex to use
// the default defined by the build configuration.
func WithoutDefaultExportCache() MetadataOpt {
return metadataOptFunc(func(md *OpMetadata) {
// nil means no vertex based config has been set
md.ExportCache = nil
})
}
type opMetaWrapper struct {
OpMetadata
}
func (mw *opMetaWrapper) ApplyMetadata(f func(m *OpMetadata)) {
f(&mw.OpMetadata)
}
func (mw *opMetaWrapper) Metadata() OpMetadata {
return mw.OpMetadata
}

50
vendor/github.com/moby/buildkit/client/prune.go generated vendored Normal file
View file

@ -0,0 +1,50 @@
package client
import (
"context"
"io"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/pkg/errors"
)
func (c *Client) Prune(ctx context.Context, ch chan UsageInfo, opts ...PruneOption) error {
info := &PruneInfo{}
for _, o := range opts {
o(info)
}
req := &controlapi.PruneRequest{}
cl, err := c.controlClient().Prune(ctx, req)
if err != nil {
return errors.Wrap(err, "failed to call prune")
}
for {
d, err := cl.Recv()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
if ch != nil {
ch <- UsageInfo{
ID: d.ID,
Mutable: d.Mutable,
InUse: d.InUse,
Size: d.Size_,
Parent: d.Parent,
CreatedAt: d.CreatedAt,
Description: d.Description,
UsageCount: int(d.UsageCount),
LastUsedAt: d.LastUsedAt,
}
}
}
}
type PruneOption func(*PruneInfo)
type PruneInfo struct {
}

251
vendor/github.com/moby/buildkit/client/solve.go generated vendored Normal file
View file

@ -0,0 +1,251 @@
package client
import (
"context"
"io"
"os"
"path/filepath"
"strings"
"time"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/filesync"
"github.com/moby/buildkit/session/grpchijack"
"github.com/moby/buildkit/solver/pb"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
type SolveOpt struct {
Exporter string
ExporterAttrs map[string]string
ExporterOutput io.WriteCloser // for ExporterOCI and ExporterDocker
ExporterOutputDir string // for ExporterLocal
LocalDirs map[string]string
SharedKey string
Frontend string
FrontendAttrs map[string]string
ExportCache string
ExportCacheAttrs map[string]string
ImportCache []string
Session []session.Attachable
}
// Solve calls Solve on the controller.
// def must be nil if (and only if) opt.Frontend is set.
func (c *Client) Solve(ctx context.Context, def *llb.Definition, opt SolveOpt, statusChan chan *SolveStatus) (*SolveResponse, error) {
defer func() {
if statusChan != nil {
close(statusChan)
}
}()
if opt.Frontend == "" && def == nil {
return nil, errors.New("invalid empty definition")
}
if opt.Frontend != "" && def != nil {
return nil, errors.Errorf("invalid definition for frontend %s", opt.Frontend)
}
syncedDirs, err := prepareSyncedDirs(def, opt.LocalDirs)
if err != nil {
return nil, err
}
ref := identity.NewID()
eg, ctx := errgroup.WithContext(ctx)
statusContext, cancelStatus := context.WithCancel(context.Background())
defer cancelStatus()
if span := opentracing.SpanFromContext(ctx); span != nil {
statusContext = opentracing.ContextWithSpan(statusContext, span)
}
s, err := session.NewSession(statusContext, defaultSessionName(), opt.SharedKey)
if err != nil {
return nil, errors.Wrap(err, "failed to create session")
}
if len(syncedDirs) > 0 {
s.Allow(filesync.NewFSSyncProvider(syncedDirs))
}
for _, a := range opt.Session {
s.Allow(a)
}
switch opt.Exporter {
case ExporterLocal:
if opt.ExporterOutput != nil {
return nil, errors.New("output file writer is not supported by local exporter")
}
if opt.ExporterOutputDir == "" {
return nil, errors.New("output directory is required for local exporter")
}
s.Allow(filesync.NewFSSyncTargetDir(opt.ExporterOutputDir))
case ExporterOCI, ExporterDocker:
if opt.ExporterOutputDir != "" {
return nil, errors.Errorf("output directory %s is not supported by %s exporter", opt.ExporterOutputDir, opt.Exporter)
}
if opt.ExporterOutput == nil {
return nil, errors.Errorf("output file writer is required for %s exporter", opt.Exporter)
}
s.Allow(filesync.NewFSSyncTarget(opt.ExporterOutput))
default:
if opt.ExporterOutput != nil {
return nil, errors.Errorf("output file writer is not supported by %s exporter", opt.Exporter)
}
if opt.ExporterOutputDir != "" {
return nil, errors.Errorf("output directory %s is not supported by %s exporter", opt.ExporterOutputDir, opt.Exporter)
}
}
eg.Go(func() error {
return s.Run(statusContext, grpchijack.Dialer(c.controlClient()))
})
var res *SolveResponse
eg.Go(func() error {
defer func() { // make sure the Status ends cleanly on build errors
go func() {
<-time.After(3 * time.Second)
cancelStatus()
}()
logrus.Debugf("stopping session")
s.Close()
}()
var pbd *pb.Definition
if def != nil {
pbd = def.ToPB()
}
resp, err := c.controlClient().Solve(ctx, &controlapi.SolveRequest{
Ref: ref,
Definition: pbd,
Exporter: opt.Exporter,
ExporterAttrs: opt.ExporterAttrs,
Session: s.ID(),
Frontend: opt.Frontend,
FrontendAttrs: opt.FrontendAttrs,
Cache: controlapi.CacheOptions{
ExportRef: opt.ExportCache,
ImportRefs: opt.ImportCache,
ExportAttrs: opt.ExportCacheAttrs,
},
})
if err != nil {
return errors.Wrap(err, "failed to solve")
}
res = &SolveResponse{
ExporterResponse: resp.ExporterResponse,
}
return nil
})
eg.Go(func() error {
stream, err := c.controlClient().Status(statusContext, &controlapi.StatusRequest{
Ref: ref,
})
if err != nil {
return errors.Wrap(err, "failed to get status")
}
for {
resp, err := stream.Recv()
if err != nil {
if err == io.EOF {
return nil
}
return errors.Wrap(err, "failed to receive status")
}
s := SolveStatus{}
for _, v := range resp.Vertexes {
s.Vertexes = append(s.Vertexes, &Vertex{
Digest: v.Digest,
Inputs: v.Inputs,
Name: v.Name,
Started: v.Started,
Completed: v.Completed,
Error: v.Error,
Cached: v.Cached,
})
}
for _, v := range resp.Statuses {
s.Statuses = append(s.Statuses, &VertexStatus{
ID: v.ID,
Vertex: v.Vertex,
Name: v.Name,
Total: v.Total,
Current: v.Current,
Timestamp: v.Timestamp,
Started: v.Started,
Completed: v.Completed,
})
}
for _, v := range resp.Logs {
s.Logs = append(s.Logs, &VertexLog{
Vertex: v.Vertex,
Stream: int(v.Stream),
Data: v.Msg,
Timestamp: v.Timestamp,
})
}
if statusChan != nil {
statusChan <- &s
}
}
})
if err := eg.Wait(); err != nil {
return nil, err
}
return res, nil
}
func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]filesync.SyncedDir, error) {
for _, d := range localDirs {
fi, err := os.Stat(d)
if err != nil {
return nil, errors.Wrapf(err, "could not find %s", d)
}
if !fi.IsDir() {
return nil, errors.Errorf("%s not a directory", d)
}
}
dirs := make([]filesync.SyncedDir, 0, len(localDirs))
if def == nil {
for name, d := range localDirs {
dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d})
}
} else {
for _, dt := range def.Def {
var op pb.Op
if err := (&op).Unmarshal(dt); err != nil {
return nil, errors.Wrap(err, "failed to parse llb proto op")
}
if src := op.GetSource(); src != nil {
if strings.HasPrefix(src.Identifier, "local://") { // TODO: just make a type property
name := strings.TrimPrefix(src.Identifier, "local://")
d, ok := localDirs[name]
if !ok {
return nil, errors.Errorf("local directory %s not enabled", name)
}
dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d}) // TODO: excludes
}
}
}
}
return dirs, nil
}
func defaultSessionName() string {
wd, err := os.Getwd()
if err != nil {
return "unknown"
}
return filepath.Base(wd)
}

49
vendor/github.com/moby/buildkit/client/workers.go generated vendored Normal file
View file

@ -0,0 +1,49 @@
package client
import (
"context"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/pkg/errors"
)
type WorkerInfo struct {
ID string
Labels map[string]string
}
func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]*WorkerInfo, error) {
info := &ListWorkersInfo{}
for _, o := range opts {
o(info)
}
req := &controlapi.ListWorkersRequest{Filter: info.Filter}
resp, err := c.controlClient().ListWorkers(ctx, req)
if err != nil {
return nil, errors.Wrap(err, "failed to list workers")
}
var wi []*WorkerInfo
for _, w := range resp.Record {
wi = append(wi, &WorkerInfo{
ID: w.ID,
Labels: w.Labels,
})
}
return wi, nil
}
type ListWorkersOption func(*ListWorkersInfo)
type ListWorkersInfo struct {
Filter []string
}
func WithWorkerFilter(f []string) ListWorkersOption {
return func(wi *ListWorkersInfo) {
wi.Filter = f
}
}

292
vendor/github.com/moby/buildkit/control/control.go generated vendored Normal file
View file

@ -0,0 +1,292 @@
package control
import (
"context"
"github.com/docker/distribution/reference"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/cache/remotecache"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/exporter"
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/grpchijack"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/llbsolver"
"github.com/moby/buildkit/worker"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
)
type Opt struct {
SessionManager *session.Manager
WorkerController *worker.Controller
Frontends map[string]frontend.Frontend
CacheKeyStorage solver.CacheKeyStorage
CacheExporter *remotecache.CacheExporter
CacheImporter *remotecache.CacheImporter
}
type Controller struct { // TODO: ControlService
opt Opt
solver *llbsolver.Solver
}
func NewController(opt Opt) (*Controller, error) {
solver := llbsolver.New(opt.WorkerController, opt.Frontends, opt.CacheKeyStorage, opt.CacheImporter)
c := &Controller{
opt: opt,
solver: solver,
}
return c, nil
}
func (c *Controller) Register(server *grpc.Server) error {
controlapi.RegisterControlServer(server, c)
return nil
}
func (c *Controller) DiskUsage(ctx context.Context, r *controlapi.DiskUsageRequest) (*controlapi.DiskUsageResponse, error) {
resp := &controlapi.DiskUsageResponse{}
workers, err := c.opt.WorkerController.List()
if err != nil {
return nil, err
}
for _, w := range workers {
du, err := w.DiskUsage(ctx, client.DiskUsageInfo{
Filter: r.Filter,
})
if err != nil {
return nil, err
}
for _, r := range du {
resp.Record = append(resp.Record, &controlapi.UsageRecord{
// TODO: add worker info
ID: r.ID,
Mutable: r.Mutable,
InUse: r.InUse,
Size_: r.Size,
Parent: r.Parent,
UsageCount: int64(r.UsageCount),
Description: r.Description,
CreatedAt: r.CreatedAt,
LastUsedAt: r.LastUsedAt,
})
}
}
return resp, nil
}
func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Control_PruneServer) error {
ch := make(chan client.UsageInfo)
eg, ctx := errgroup.WithContext(stream.Context())
workers, err := c.opt.WorkerController.List()
if err != nil {
return errors.Wrap(err, "failed to list workers for prune")
}
for _, w := range workers {
func(w worker.Worker) {
eg.Go(func() error {
return w.Prune(ctx, ch)
})
}(w)
}
eg2, ctx := errgroup.WithContext(stream.Context())
eg2.Go(func() error {
defer close(ch)
return eg.Wait()
})
eg2.Go(func() error {
for r := range ch {
if err := stream.Send(&controlapi.UsageRecord{
// TODO: add worker info
ID: r.ID,
Mutable: r.Mutable,
InUse: r.InUse,
Size_: r.Size,
Parent: r.Parent,
UsageCount: int64(r.UsageCount),
Description: r.Description,
CreatedAt: r.CreatedAt,
LastUsedAt: r.LastUsedAt,
}); err != nil {
return err
}
}
return nil
})
return eg2.Wait()
}
func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*controlapi.SolveResponse, error) {
ctx = session.NewContext(ctx, req.Session)
var expi exporter.ExporterInstance
// TODO: multiworker
// This is actually tricky, as the exporter should come from the worker that has the returned reference. We may need to delay this so that the solver loads this.
w, err := c.opt.WorkerController.GetDefault()
if err != nil {
return nil, err
}
if req.Exporter != "" {
exp, err := w.Exporter(req.Exporter)
if err != nil {
return nil, err
}
expi, err = exp.Resolve(ctx, req.ExporterAttrs)
if err != nil {
return nil, err
}
}
var cacheExporter *remotecache.RegistryCacheExporter
if ref := req.Cache.ExportRef; ref != "" {
parsed, err := reference.ParseNormalizedNamed(ref)
if err != nil {
return nil, err
}
exportCacheRef := reference.TagNameOnly(parsed).String()
cacheExporter = c.opt.CacheExporter.ExporterForTarget(exportCacheRef)
}
var importCacheRefs []string
for _, ref := range req.Cache.ImportRefs {
parsed, err := reference.ParseNormalizedNamed(ref)
if err != nil {
return nil, err
}
importCacheRefs = append(importCacheRefs, reference.TagNameOnly(parsed).String())
}
resp, err := c.solver.Solve(ctx, req.Ref, frontend.SolveRequest{
Frontend: req.Frontend,
Definition: req.Definition,
FrontendOpt: req.FrontendAttrs,
ImportCacheRefs: importCacheRefs,
}, llbsolver.ExporterRequest{
Exporter: expi,
CacheExporter: cacheExporter,
CacheExportMode: parseCacheExporterOpt(req.Cache.ExportAttrs),
})
if err != nil {
return nil, err
}
return &controlapi.SolveResponse{
ExporterResponse: resp.ExporterResponse,
}, nil
}
func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Control_StatusServer) error {
ch := make(chan *client.SolveStatus, 8)
eg, ctx := errgroup.WithContext(stream.Context())
eg.Go(func() error {
return c.solver.Status(ctx, req.Ref, ch)
})
eg.Go(func() error {
for {
ss, ok := <-ch
if !ok {
return nil
}
sr := controlapi.StatusResponse{}
for _, v := range ss.Vertexes {
sr.Vertexes = append(sr.Vertexes, &controlapi.Vertex{
Digest: v.Digest,
Inputs: v.Inputs,
Name: v.Name,
Started: v.Started,
Completed: v.Completed,
Error: v.Error,
Cached: v.Cached,
})
}
for _, v := range ss.Statuses {
sr.Statuses = append(sr.Statuses, &controlapi.VertexStatus{
ID: v.ID,
Vertex: v.Vertex,
Name: v.Name,
Current: v.Current,
Total: v.Total,
Timestamp: v.Timestamp,
Started: v.Started,
Completed: v.Completed,
})
}
for _, v := range ss.Logs {
sr.Logs = append(sr.Logs, &controlapi.VertexLog{
Vertex: v.Vertex,
Stream: int64(v.Stream),
Msg: v.Data,
Timestamp: v.Timestamp,
})
}
if err := stream.SendMsg(&sr); err != nil {
return err
}
}
})
return eg.Wait()
}
func (c *Controller) Session(stream controlapi.Control_SessionServer) error {
logrus.Debugf("session started")
conn, closeCh, opts := grpchijack.Hijack(stream)
defer conn.Close()
ctx, cancel := context.WithCancel(stream.Context())
go func() {
<-closeCh
cancel()
}()
err := c.opt.SessionManager.HandleConn(ctx, conn, opts)
logrus.Debugf("session finished: %v", err)
return err
}
func (c *Controller) ListWorkers(ctx context.Context, r *controlapi.ListWorkersRequest) (*controlapi.ListWorkersResponse, error) {
resp := &controlapi.ListWorkersResponse{}
workers, err := c.opt.WorkerController.List(r.Filter...)
if err != nil {
return nil, err
}
for _, w := range workers {
resp.Record = append(resp.Record, &controlapi.WorkerRecord{
ID: w.ID(),
Labels: w.Labels(),
})
}
return resp, nil
}
func parseCacheExporterOpt(opt map[string]string) solver.CacheExportMode {
for k, v := range opt {
switch k {
case "mode":
switch v {
case "min":
return solver.CacheExportModeMin
case "max":
return solver.CacheExportModeMax
default:
logrus.Debugf("skipping incalid cache export mode: %s", v)
}
default:
logrus.Warnf("skipping invalid cache export opt: %s", v)
}
}
return solver.CacheExportModeMin
}

30
vendor/github.com/moby/buildkit/executor/executor.go generated vendored Normal file
View file

@ -0,0 +1,30 @@
package executor
import (
"context"
"io"
"github.com/moby/buildkit/cache"
)
type Meta struct {
Args []string
Env []string
User string
Cwd string
Tty bool
ReadonlyRootFS bool
// DisableNetworking bool
}
type Mount struct {
Src cache.Mountable
Selector string
Dest string
Readonly bool
}
type Executor interface {
// TODO: add stdout/err
Exec(ctx context.Context, meta Meta, rootfs cache.Mountable, mounts []Mount, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error
}

38
vendor/github.com/moby/buildkit/executor/oci/hosts.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
package oci
import (
"context"
"io/ioutil"
"os"
"path/filepath"
)
const hostsContent = `
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
`
func GetHostsFile(ctx context.Context, stateDir string) (string, error) {
p := filepath.Join(stateDir, "hosts")
_, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) {
_, err := os.Stat(p)
if err == nil {
return "", nil
}
if !os.IsNotExist(err) {
return "", err
}
if err := ioutil.WriteFile(p+".tmp", []byte(hostsContent), 0644); err != nil {
return "", err
}
if err := os.Rename(p+".tmp", p); err != nil {
return "", err
}
return "", nil
})
if err != nil {
return "", err
}
return p, nil
}

68
vendor/github.com/moby/buildkit/executor/oci/mounts.go generated vendored Normal file
View file

@ -0,0 +1,68 @@
package oci
import (
"context"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
// MountOpts sets oci spec specific info for mount points
type MountOpts func([]specs.Mount) []specs.Mount
//GetMounts returns default required for buildkit
// https://github.com/moby/buildkit/issues/429
func GetMounts(ctx context.Context, mountOpts ...MountOpts) []specs.Mount {
mounts := []specs.Mount{
{
Destination: "/proc",
Type: "proc",
Source: "proc",
},
{
Destination: "/dev",
Type: "tmpfs",
Source: "tmpfs",
Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"},
},
{
Destination: "/dev/pts",
Type: "devpts",
Source: "devpts",
Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"},
},
{
Destination: "/dev/shm",
Type: "tmpfs",
Source: "shm",
Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"},
},
{
Destination: "/dev/mqueue",
Type: "mqueue",
Source: "mqueue",
Options: []string{"nosuid", "noexec", "nodev"},
},
{
Destination: "/sys",
Type: "sysfs",
Source: "sysfs",
Options: []string{"nosuid", "noexec", "nodev", "ro"},
},
}
for _, o := range mountOpts {
mounts = o(mounts)
}
return mounts
}
func withROBind(src, dest string) func(m []specs.Mount) []specs.Mount {
return func(m []specs.Mount) []specs.Mount {
m = append(m, specs.Mount{
Destination: dest,
Type: "bind",
Source: src,
Options: []string{"rbind", "ro"},
})
return m
}
}

View file

@ -0,0 +1,81 @@
package oci
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"github.com/docker/libnetwork/resolvconf"
"github.com/moby/buildkit/util/flightcontrol"
)
var g flightcontrol.Group
var notFirstRun bool
var lastNotEmpty bool
func GetResolvConf(ctx context.Context, stateDir string) (string, error) {
p := filepath.Join(stateDir, "resolv.conf")
_, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) {
generate := !notFirstRun
notFirstRun = true
if !generate {
fi, err := os.Stat(p)
if err != nil {
if !os.IsNotExist(err) {
return "", err
}
generate = true
}
if !generate {
fiMain, err := os.Stat("/etc/resolv.conf")
if err != nil {
if !os.IsNotExist(err) {
return nil, err
}
if lastNotEmpty {
generate = true
lastNotEmpty = false
}
} else {
if fi.ModTime().Before(fiMain.ModTime()) {
generate = true
}
}
}
}
if !generate {
return "", nil
}
var dt []byte
f, err := resolvconf.Get()
if err != nil {
if !os.IsNotExist(err) {
return "", err
}
} else {
dt = f.Content
}
f, err = resolvconf.FilterResolvDNS(dt, true)
if err != nil {
return "", err
}
if err := ioutil.WriteFile(p+".tmp", f.Content, 0644); err != nil {
return "", err
}
if err := os.Rename(p+".tmp", p); err != nil {
return "", err
}
return "", nil
})
if err != nil {
return "", err
}
return p, nil
}

View file

@ -0,0 +1,163 @@
// +build !windows
package oci
import (
"context"
"path"
"sync"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/oci"
"github.com/mitchellh/hashstructure"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/snapshot"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
// Ideally we don't have to import whole containerd just for the default spec
// GenerateSpec generates spec using containerd functionality.
func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, opts ...oci.SpecOpts) (*specs.Spec, func(), error) {
c := &containers.Container{
ID: id,
}
_, ok := namespaces.Namespace(ctx)
if !ok {
ctx = namespaces.WithNamespace(ctx, "buildkit")
}
opts = append(opts,
oci.WithHostNamespace(specs.NetworkNamespace),
)
// Note that containerd.GenerateSpec is namespaced so as to make
// specs.Linux.CgroupsPath namespaced
s, err := oci.GenerateSpec(ctx, nil, c, opts...)
if err != nil {
return nil, nil, err
}
s.Process.Args = meta.Args
s.Process.Env = meta.Env
s.Process.Cwd = meta.Cwd
s.Mounts = GetMounts(ctx,
withROBind(resolvConf, "/etc/resolv.conf"),
withROBind(hostsFile, "/etc/hosts"),
)
// TODO: User
sm := &submounts{}
var releasers []func() error
releaseAll := func() {
sm.cleanup()
for _, f := range releasers {
f()
}
}
for _, m := range mounts {
if m.Src == nil {
return nil, nil, errors.Errorf("mount %s has no source", m.Dest)
}
mountable, err := m.Src.Mount(ctx, m.Readonly)
if err != nil {
releaseAll()
return nil, nil, errors.Wrapf(err, "failed to mount %s", m.Dest)
}
mounts, err := mountable.Mount()
if err != nil {
releaseAll()
return nil, nil, errors.WithStack(err)
}
releasers = append(releasers, mountable.Release)
for _, mount := range mounts {
mount, err = sm.subMount(mount, m.Selector)
if err != nil {
releaseAll()
return nil, nil, err
}
s.Mounts = append(s.Mounts, specs.Mount{
Destination: m.Dest,
Type: mount.Type,
Source: mount.Source,
Options: mount.Options,
})
}
}
return s, releaseAll, nil
}
type mountRef struct {
mount mount.Mount
unmount func() error
}
type submounts struct {
m map[uint64]mountRef
}
func (s *submounts) subMount(m mount.Mount, subPath string) (mount.Mount, error) {
if path.Join("/", subPath) == "/" {
return m, nil
}
if s.m == nil {
s.m = map[uint64]mountRef{}
}
h, err := hashstructure.Hash(m, nil)
if err != nil {
return mount.Mount{}, nil
}
if mr, ok := s.m[h]; ok {
return sub(mr.mount, subPath), nil
}
lm := snapshot.LocalMounterWithMounts([]mount.Mount{m})
mp, err := lm.Mount()
if err != nil {
return mount.Mount{}, err
}
opts := []string{"rbind"}
for _, opt := range m.Options {
if opt == "ro" {
opts = append(opts, opt)
}
}
s.m[h] = mountRef{
mount: mount.Mount{
Source: mp,
Type: "bind",
Options: opts,
},
unmount: lm.Unmount,
}
return sub(s.m[h].mount, subPath), nil
}
func (s *submounts) cleanup() {
var wg sync.WaitGroup
wg.Add(len(s.m))
for _, m := range s.m {
func(m mountRef) {
go func() {
m.unmount()
wg.Done()
}()
}(m)
}
wg.Wait()
}
func sub(m mount.Mount, subPath string) mount.Mount {
m.Source = path.Join(m.Source, subPath)
return m
}

86
vendor/github.com/moby/buildkit/executor/oci/user.go generated vendored Normal file
View file

@ -0,0 +1,86 @@
package oci
import (
"context"
"os"
"strconv"
"strings"
"github.com/containerd/continuity/fs"
"github.com/opencontainers/runc/libcontainer/user"
)
func GetUser(ctx context.Context, root, username string) (uint32, uint32, error) {
// fast path from uid/gid
if uid, gid, err := ParseUser(username); err == nil {
return uid, gid, nil
}
passwdPath, err := user.GetPasswdPath()
if err != nil {
return 0, 0, err
}
groupPath, err := user.GetGroupPath()
if err != nil {
return 0, 0, err
}
passwdFile, err := openUserFile(root, passwdPath)
if err == nil {
defer passwdFile.Close()
}
groupFile, err := openUserFile(root, groupPath)
if err == nil {
defer groupFile.Close()
}
execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile)
if err != nil {
return 0, 0, err
}
return uint32(execUser.Uid), uint32(execUser.Gid), nil
}
func ParseUser(str string) (uid uint32, gid uint32, err error) {
if str == "" {
return 0, 0, nil
}
parts := strings.SplitN(str, ":", 2)
for i, v := range parts {
switch i {
case 0:
uid, err = parseUID(v)
if err != nil {
return 0, 0, err
}
if len(parts) == 1 {
gid = uid
}
case 1:
gid, err = parseUID(v)
if err != nil {
return 0, 0, err
}
}
}
return
}
func openUserFile(root, p string) (*os.File, error) {
p, err := fs.RootPath(root, p)
if err != nil {
return nil, err
}
return os.Open(p)
}
func parseUID(str string) (uint32, error) {
if str == "root" {
return 0, nil
}
uid, err := strconv.ParseUint(str, 10, 32)
if err != nil {
return 0, err
}
return uint32(uid), nil
}

View file

@ -0,0 +1,249 @@
package runcexecutor
import (
"context"
"encoding/json"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"syscall"
"github.com/containerd/containerd/contrib/seccomp"
"github.com/containerd/containerd/mount"
containerdoci "github.com/containerd/containerd/oci"
"github.com/containerd/continuity/fs"
runc "github.com/containerd/go-runc"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/executor/oci"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/util/libcontainer_specconv"
"github.com/moby/buildkit/util/system"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type Opt struct {
// root directory
Root string
CommandCandidates []string
// without root privileges (has nothing to do with Opt.Root directory)
Rootless bool
}
var defaultCommandCandidates = []string{"buildkit-runc", "runc"}
type runcExecutor struct {
runc *runc.Runc
root string
cmd string
rootless bool
}
func New(opt Opt) (executor.Executor, error) {
cmds := opt.CommandCandidates
if cmds == nil {
cmds = defaultCommandCandidates
}
var cmd string
var found bool
for _, cmd = range cmds {
if _, err := exec.LookPath(cmd); err == nil {
found = true
break
}
}
if !found {
return nil, errors.Errorf("failed to find %s binary", cmd)
}
root := opt.Root
if err := os.MkdirAll(root, 0700); err != nil {
return nil, errors.Wrapf(err, "failed to create %s", root)
}
root, err := filepath.Abs(root)
if err != nil {
return nil, err
}
root, err = filepath.EvalSymlinks(root)
if err != nil {
return nil, err
}
runtime := &runc.Runc{
Command: cmd,
Log: filepath.Join(root, "runc-log.json"),
LogFormat: runc.JSON,
PdeathSignal: syscall.SIGKILL,
Setpgid: true,
}
w := &runcExecutor{
runc: runtime,
root: root,
rootless: opt.Rootless,
}
return w, nil
}
func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache.Mountable, mounts []executor.Mount, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error {
resolvConf, err := oci.GetResolvConf(ctx, w.root)
if err != nil {
return err
}
hostsFile, err := oci.GetHostsFile(ctx, w.root)
if err != nil {
return err
}
mountable, err := root.Mount(ctx, false)
if err != nil {
return err
}
rootMount, err := mountable.Mount()
if err != nil {
return err
}
defer mountable.Release()
id := identity.NewID()
bundle := filepath.Join(w.root, id)
if err := os.Mkdir(bundle, 0700); err != nil {
return err
}
defer os.RemoveAll(bundle)
rootFSPath := filepath.Join(bundle, "rootfs")
if err := os.Mkdir(rootFSPath, 0700); err != nil {
return err
}
if err := mount.All(rootMount, rootFSPath); err != nil {
return err
}
defer mount.Unmount(rootFSPath, 0)
uid, gid, err := oci.GetUser(ctx, rootFSPath, meta.User)
if err != nil {
return err
}
f, err := os.Create(filepath.Join(bundle, "config.json"))
if err != nil {
return err
}
defer f.Close()
opts := []containerdoci.SpecOpts{containerdoci.WithUIDGID(uid, gid)}
if system.SeccompSupported() {
opts = append(opts, seccomp.WithDefaultProfile())
}
if meta.ReadonlyRootFS {
opts = append(opts, containerdoci.WithRootFSReadonly())
}
spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, opts...)
if err != nil {
return err
}
defer cleanup()
spec.Root.Path = rootFSPath
if _, ok := root.(cache.ImmutableRef); ok { // TODO: pass in with mount, not ref type
spec.Root.Readonly = true
}
newp, err := fs.RootPath(rootFSPath, meta.Cwd)
if err != nil {
return errors.Wrapf(err, "working dir %s points to invalid target", newp)
}
if err := os.MkdirAll(newp, 0700); err != nil {
return errors.Wrapf(err, "failed to create working directory %s", newp)
}
if w.rootless {
specconv.ToRootless(spec, &specconv.RootlessOpts{
MapSubUIDGID: true,
})
// TODO(AkihiroSuda): keep Cgroups enabled if /sys/fs/cgroup/cpuset/buildkit exists and writable
spec.Linux.CgroupsPath = ""
// TODO(AkihiroSuda): ToRootless removes netns, but we should readd netns here
// if either SUID or userspace NAT is configured on the host.
if err := setOOMScoreAdj(spec); err != nil {
return err
}
}
if err := json.NewEncoder(f).Encode(spec); err != nil {
return err
}
logrus.Debugf("> running %s %v", id, meta.Args)
status, err := w.runc.Run(ctx, id, bundle, &runc.CreateOpts{
IO: &forwardIO{stdin: stdin, stdout: stdout, stderr: stderr},
})
logrus.Debugf("< completed %s %v %v", id, status, err)
if status != 0 {
select {
case <-ctx.Done():
// runc can't report context.Cancelled directly
return errors.Wrapf(ctx.Err(), "exit code %d", status)
default:
}
return errors.Errorf("exit code %d", status)
}
return err
}
type forwardIO struct {
stdin io.ReadCloser
stdout, stderr io.WriteCloser
}
func (s *forwardIO) Close() error {
return nil
}
func (s *forwardIO) Set(cmd *exec.Cmd) {
cmd.Stdin = s.stdin
cmd.Stdout = s.stdout
cmd.Stderr = s.stderr
}
func (s *forwardIO) Stdin() io.WriteCloser {
return nil
}
func (s *forwardIO) Stdout() io.ReadCloser {
return nil
}
func (s *forwardIO) Stderr() io.ReadCloser {
return nil
}
// setOOMScoreAdj comes from https://github.com/genuinetools/img/blob/2fabe60b7dc4623aa392b515e013bbc69ad510ab/executor/runc/executor.go#L182-L192
func setOOMScoreAdj(spec *specs.Spec) error {
// Set the oom_score_adj of our children containers to that of the current process.
b, err := ioutil.ReadFile("/proc/self/oom_score_adj")
if err != nil {
return errors.Wrap(err, "failed to read /proc/self/oom_score_adj")
}
s := strings.TrimSpace(string(b))
oom, err := strconv.Atoi(s)
if err != nil {
return errors.Wrapf(err, "failed to parse %s as int", s)
}
spec.Process.OOMScoreAdj = &oom
return nil
}

16
vendor/github.com/moby/buildkit/exporter/exporter.go generated vendored Normal file
View file

@ -0,0 +1,16 @@
package exporter
import (
"context"
"github.com/moby/buildkit/cache"
)
type Exporter interface {
Resolve(context.Context, map[string]string) (ExporterInstance, error)
}
type ExporterInstance interface {
Name() string
Export(context.Context, cache.ImmutableRef, map[string][]byte) (map[string]string, error)
}

View file

@ -0,0 +1,276 @@
package builder
import (
"archive/tar"
"bytes"
"context"
"encoding/json"
"regexp"
"strings"
"github.com/docker/docker/builder/dockerignore"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb"
"github.com/moby/buildkit/frontend/gateway/client"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
const (
LocalNameContext = "context"
LocalNameDockerfile = "dockerfile"
keyTarget = "target"
keyFilename = "filename"
keyCacheFrom = "cache-from"
exporterImageConfig = "containerimage.config"
defaultDockerfileName = "Dockerfile"
dockerignoreFilename = ".dockerignore"
buildArgPrefix = "build-arg:"
labelPrefix = "label:"
keyNoCache = "no-cache"
)
var httpPrefix = regexp.MustCompile("^https?://")
var gitUrlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$")
func Build(ctx context.Context, c client.Client) error {
opts := c.Opts()
filename := opts[keyFilename]
if filename == "" {
filename = defaultDockerfileName
}
var ignoreCache []string
if v, ok := opts[keyNoCache]; ok {
if v == "" {
ignoreCache = []string{} // means all stages
} else {
ignoreCache = strings.Split(v, ",")
}
}
src := llb.Local(LocalNameDockerfile,
llb.IncludePatterns([]string{filename}),
llb.SessionID(c.SessionID()),
llb.SharedKeyHint(defaultDockerfileName),
)
var buildContext *llb.State
isScratchContext := false
if st, ok := detectGitContext(opts[LocalNameContext]); ok {
src = *st
buildContext = &src
} else if httpPrefix.MatchString(opts[LocalNameContext]) {
httpContext := llb.HTTP(opts[LocalNameContext], llb.Filename("context"))
def, err := httpContext.Marshal()
if err != nil {
return err
}
ref, err := c.Solve(ctx, client.SolveRequest{
Definition: def.ToPB(),
}, nil, false)
if err != nil {
return err
}
dt, err := ref.ReadFile(ctx, client.ReadRequest{
Filename: "context",
Range: &client.FileRange{
Length: 1024,
},
})
if err != nil {
return err
}
if isArchive(dt) {
unpack := llb.Image(dockerfile2llb.CopyImage).
Run(llb.Shlex("copy --unpack /src/context /out/"), llb.ReadonlyRootFS())
unpack.AddMount("/src", httpContext, llb.Readonly)
src = unpack.AddMount("/out", llb.Scratch())
buildContext = &src
} else {
filename = "context"
src = httpContext
buildContext = &src
isScratchContext = true
}
}
def, err := src.Marshal()
if err != nil {
return err
}
eg, ctx2 := errgroup.WithContext(ctx)
var dtDockerfile []byte
eg.Go(func() error {
ref, err := c.Solve(ctx2, client.SolveRequest{
Definition: def.ToPB(),
}, nil, false)
if err != nil {
return err
}
dtDockerfile, err = ref.ReadFile(ctx2, client.ReadRequest{
Filename: filename,
})
if err != nil {
return err
}
return nil
})
var excludes []string
if !isScratchContext {
eg.Go(func() error {
dockerignoreState := buildContext
if dockerignoreState == nil {
st := llb.Local(LocalNameContext,
llb.SessionID(c.SessionID()),
llb.IncludePatterns([]string{dockerignoreFilename}),
llb.SharedKeyHint(dockerignoreFilename),
)
dockerignoreState = &st
}
def, err := dockerignoreState.Marshal()
if err != nil {
return err
}
ref, err := c.Solve(ctx2, client.SolveRequest{
Definition: def.ToPB(),
}, nil, false)
if err != nil {
return err
}
dtDockerignore, err := ref.ReadFile(ctx2, client.ReadRequest{
Filename: dockerignoreFilename,
})
if err == nil {
excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dtDockerignore))
if err != nil {
return errors.Wrap(err, "failed to parse dockerignore")
}
}
return nil
})
}
if err := eg.Wait(); err != nil {
return err
}
if _, ok := c.Opts()["cmdline"]; !ok {
ref, cmdline, ok := dockerfile2llb.DetectSyntax(bytes.NewBuffer(dtDockerfile))
if ok {
return forwardGateway(ctx, c, ref, cmdline)
}
}
st, img, err := dockerfile2llb.Dockerfile2LLB(ctx, dtDockerfile, dockerfile2llb.ConvertOpt{
Target: opts[keyTarget],
MetaResolver: c,
BuildArgs: filter(opts, buildArgPrefix),
Labels: filter(opts, labelPrefix),
SessionID: c.SessionID(),
BuildContext: buildContext,
Excludes: excludes,
IgnoreCache: ignoreCache,
})
if err != nil {
return err
}
def, err = st.Marshal()
if err != nil {
return err
}
config, err := json.Marshal(img)
if err != nil {
return err
}
var cacheFrom []string
if cacheFromStr := opts[keyCacheFrom]; cacheFromStr != "" {
cacheFrom = strings.Split(cacheFromStr, ",")
}
_, err = c.Solve(ctx, client.SolveRequest{
Definition: def.ToPB(),
ImportCacheRefs: cacheFrom,
}, map[string][]byte{
exporterImageConfig: config,
}, true)
if err != nil {
return err
}
return nil
}
func forwardGateway(ctx context.Context, c client.Client, ref string, cmdline string) error {
opts := c.Opts()
if opts == nil {
opts = map[string]string{}
}
opts["cmdline"] = cmdline
opts["source"] = ref
_, err := c.Solve(ctx, client.SolveRequest{
Frontend: "gateway.v0",
FrontendOpt: opts,
}, nil, true)
return err
}
func filter(opt map[string]string, key string) map[string]string {
m := map[string]string{}
for k, v := range opt {
if strings.HasPrefix(k, key) {
m[strings.TrimPrefix(k, key)] = v
}
}
return m
}
func detectGitContext(ref string) (*llb.State, bool) {
found := false
if httpPrefix.MatchString(ref) && gitUrlPathWithFragmentSuffix.MatchString(ref) {
found = true
}
for _, prefix := range []string{"git://", "github.com/", "git@"} {
if strings.HasPrefix(ref, prefix) {
found = true
break
}
}
if !found {
return nil, false
}
parts := strings.SplitN(ref, "#", 2)
branch := ""
if len(parts) > 1 {
branch = parts[1]
}
st := llb.Git(parts[0], branch)
return &st, true
}
func isArchive(header []byte) bool {
for _, m := range [][]byte{
{0x42, 0x5A, 0x68}, // bzip2
{0x1F, 0x8B, 0x08}, // gzip
{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, // xz
} {
if len(header) < len(m) {
continue
}
if bytes.Equal(m, header[:len(m)]) {
return true
}
}
r := tar.NewReader(bytes.NewBuffer(header))
_, err := r.Next()
return err == nil
}

Some files were not shown because too many files have changed in this diff Show more