builder: adapter update after vendor update

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
Tonis Tiigi 2018-05-14 11:05:49 -07:00
parent 46bd229b51
commit b7424599f6
8 changed files with 252 additions and 486 deletions

View file

@ -242,11 +242,11 @@ func (p *puller) resolve(ctx context.Context) error {
return p.resolveErr
}
func (p *puller) CacheKey(ctx context.Context) (string, error) {
func (p *puller) CacheKey(ctx context.Context, index int) (string, bool, error) {
if err := p.resolve(ctx); err != nil {
return "", err
return "", false, err
}
return p.cacheKey.String(), nil
return p.cacheKey.String(), true, nil
}
func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {

View file

@ -219,7 +219,7 @@ func (s *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, err
return inf, nil
}
func (s *snapshotter) Mounts(ctx context.Context, key string) (snapshot.MountFactory, error) {
func (s *snapshotter) Mounts(ctx context.Context, key string) (snapshot.Mountable, error) {
l, err := s.getLayer(key)
if err != nil {
return nil, err
@ -239,7 +239,7 @@ func (s *snapshotter) Mounts(ctx context.Context, key string) (snapshot.MountFac
Type: "bind",
Options: []string{"rbind"},
}}
return &constMountFactory{
return &constMountable{
mounts: mnt,
release: func() error {
_, err := s.opt.LayerStore.ReleaseRWLayer(rwlayer)
@ -259,7 +259,7 @@ func (s *snapshotter) Mounts(ctx context.Context, key string) (snapshot.MountFac
Type: "bind",
Options: []string{"rbind"},
}}
return &constMountFactory{
return &constMountable{
mounts: mnt,
release: func() error {
return s.opt.GraphDriver.Put(id)
@ -314,70 +314,14 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap
}); err != nil {
return err
}
// logrus.Debugf("committed %s as %s", name, key));
return nil
}
func (s *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) (snapshot.MountFactory, error) {
func (s *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) (snapshot.Mountable, error) {
return s.Mounts(ctx, parent)
}
func (s *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapshots.Info) error) error {
// allKeys := map[string]struct{}{}
// commitedIDs := map[string]string{}
// chainIDs := map[string]layer.ChainID{}
//
// if err := s.db.View(func(tx *bolt.Tx) error {
// tx.ForEach(func(name []byte, b *bolt.Bucket) error {
// allKeys[string(name)] = struct{}{}
// v := b.Get(keyCommitted)
// if v != nil {
// commitedIDs[string(v)] = string(name)
// }
//
// v = b.Get(keyChainID)
// if v != nil {
// logrus.Debugf("loaded layer %s %s", name, v)
// chainIDs[string(name)] = layer.ChainID(v)
// }
// return nil
// })
// return nil
// }); err != nil {
// return err
// }
//
// for k := range allKeys {
// if chainID, ok := chainIDs[k]; ok {
// s.mu.Lock()
// if _, ok := s.refs[k]; !ok {
// l, err := s.opt.LayerStore.Get(chainID)
// if err != nil {
// s.mu.Unlock()
// return err
// }
// s.refs[k] = l
// }
// s.mu.Unlock()
// }
// if _, ok := commitedIDs[k]; ok {
// continue
// }
//
// if _, err := s.getLayer(k); err != nil {
// s.Remove(ctx, k)
// continue
// }
// info, err := s.Stat(ctx, k)
// if err != nil {
// s.Remove(ctx, k)
// continue
// }
// if err := fn(ctx, info); err != nil {
// return err
// }
// }
return errors.Errorf("not-implemented")
}
@ -394,15 +338,18 @@ func (s *snapshotter) Close() error {
return s.db.Close()
}
type constMountFactory struct {
type constMountable struct {
mounts []mount.Mount
release func() error
}
func (mf *constMountFactory) Mount() ([]mount.Mount, func() error, error) {
release := mf.release
if release == nil {
release = func() error { return nil }
}
return mf.mounts, release, nil
func (m *constMountable) Mount() ([]mount.Mount, error) {
return m.mounts, nil
}
func (m *constMountable) Release() error {
if m.release == nil {
return nil
}
return m.release()
}

View file

@ -4,53 +4,24 @@ import (
"context"
"encoding/json"
"io"
"os"
"path/filepath"
"strings"
"sync"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/content/local"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/builder"
"github.com/docker/docker/builder/builder-next/containerimage"
containerimageexp "github.com/docker/docker/builder/builder-next/exporter"
"github.com/docker/docker/builder/builder-next/snapshot"
mobyworker "github.com/docker/docker/builder/builder-next/worker"
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/daemon/images"
"github.com/docker/docker/pkg/jsonmessage"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/cacheimport"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/control"
"github.com/moby/buildkit/executor/runcexecutor"
"github.com/moby/buildkit/exporter"
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/frontend/dockerfile"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/snapshot/blobmapping"
"github.com/moby/buildkit/solver-next/boltdbcachestorage"
"github.com/moby/buildkit/worker"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
netcontext "golang.org/x/net/context"
"golang.org/x/sync/errgroup"
grpcmetadata "google.golang.org/grpc/metadata"
)
// Builder defines interface for running a build
// type Builder interface {
// Build(context.Context, backend.BuildConfig) (*builder.Result, error)
// }
// Result is the output produced by a Builder
// type Result struct {
// ImageID string
// // FromImage Image
// }
type Opt struct {
SessionManager *session.Manager
Root string
@ -59,22 +30,18 @@ type Opt struct {
type Builder struct {
controller *control.Controller
results *results
mu sync.Mutex
jobs map[string]func()
}
func New(opt Opt) (*Builder, error) {
results := newResultsGetter()
c, err := newController(opt, results.ch)
c, err := newController(opt)
if err != nil {
return nil, err
}
b := &Builder{
controller: c,
results: results,
jobs: map[string]func(){},
}
return b, nil
@ -90,21 +57,10 @@ func (b *Builder) Cancel(ctx context.Context, id string) error {
}
func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.Result, error) {
if buildID := opt.Options.BuildID; buildID != "" {
b.mu.Lock()
ctx, b.jobs[buildID] = context.WithCancel(ctx)
b.mu.Unlock()
defer func() {
delete(b.jobs, buildID)
}()
}
var out builder.Result
id := identity.NewID()
attrs := map[string]string{
"ref": id,
}
frontendAttrs := map[string]string{}
if opt.Options.Target != "" {
@ -119,11 +75,11 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
frontendAttrs["context"] = opt.Options.RemoteContext
}
if len(opt.Options.CacheFrom) > 0 {
frontendAttrs["cache-from"] = opt.Options.CacheFrom[0]
var cacheFrom []string
for _, v := range opt.Options.CacheFrom {
cacheFrom = append(cacheFrom, v)
}
logrus.Debugf("frontend: %+v", frontendAttrs)
frontendAttrs["cache-from"] = strings.Join(cacheFrom, ",")
for k, v := range opt.Options.BuildArgs {
if v == nil {
@ -132,10 +88,17 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
frontendAttrs["build-arg:"+k] = *v
}
for k, v := range opt.Options.Labels {
frontendAttrs["label:"+k] = v
}
if opt.Options.NoCache {
frontendAttrs["no-cache"] = ""
}
req := &controlapi.SolveRequest{
Ref: id,
Exporter: "image",
ExporterAttrs: attrs,
Exporter: "moby",
Frontend: "dockerfile.v0",
FrontendAttrs: frontendAttrs,
Session: opt.Options.SessionID,
@ -144,8 +107,16 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
eg, ctx := errgroup.WithContext(ctx)
eg.Go(func() error {
_, err := b.controller.Solve(ctx, req)
return err
resp, err := b.controller.Solve(ctx, req)
if err != nil {
return err
}
id, ok := resp.ExporterResponse["containerimage.digest"]
if !ok {
return errors.Errorf("missing image id")
}
out.ImageID = id
return nil
})
ch := make(chan *controlapi.StatusResponse)
@ -186,173 +157,11 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
return nil
})
out := &builder.Result{}
eg.Go(func() error {
res, err := b.results.wait(ctx, id)
if err != nil {
return err
}
out.ImageID = string(res.ID)
return nil
})
if err := eg.Wait(); err != nil {
return nil, err
}
return out, nil
}
func newController(opt Opt, reporter chan containerimageexp.Result) (*control.Controller, error) {
if err := os.MkdirAll(opt.Root, 0700); err != nil {
return nil, err
}
dist := opt.Dist
root := opt.Root
var driver graphdriver.Driver
if ls, ok := dist.LayerStore.(interface {
Driver() graphdriver.Driver
}); ok {
driver = ls.Driver()
} else {
return nil, errors.Errorf("could not access graphdriver")
}
sbase, err := snapshot.NewSnapshotter(snapshot.Opt{
GraphDriver: driver,
LayerStore: dist.LayerStore,
Root: root,
})
if err != nil {
return nil, err
}
store, err := local.NewStore(filepath.Join(root, "content"))
if err != nil {
return nil, err
}
store = &contentStoreNoLabels{store}
md, err := metadata.NewStore(filepath.Join(root, "metadata.db"))
if err != nil {
return nil, err
}
snapshotter := blobmapping.NewSnapshotter(blobmapping.Opt{
Content: store,
Snapshotter: sbase,
MetadataStore: md,
})
cm, err := cache.NewManager(cache.ManagerOpt{
Snapshotter: snapshotter,
MetadataStore: md,
})
if err != nil {
return nil, err
}
src, err := containerimage.NewSource(containerimage.SourceOpt{
SessionManager: opt.SessionManager,
CacheAccessor: cm,
ContentStore: store,
DownloadManager: dist.DownloadManager,
MetadataStore: dist.V2MetadataService,
ImageStore: dist.ImageStore,
ReferenceStore: dist.ReferenceStore,
})
if err != nil {
return nil, err
}
exec, err := runcexecutor.New(runcexecutor.Opt{
Root: filepath.Join(root, "executor"),
CommandCandidates: []string{"docker-runc", "runc"},
})
if err != nil {
return nil, err
}
differ, ok := sbase.(containerimageexp.Differ)
if !ok {
return nil, errors.Errorf("snapshotter doesn't support differ")
}
exp, err := containerimageexp.New(containerimageexp.Opt{
ImageStore: dist.ImageStore,
ReferenceStore: dist.ReferenceStore,
Differ: differ,
Reporter: reporter,
})
if err != nil {
return nil, err
}
cacheStorage, err := boltdbcachestorage.NewStore(filepath.Join(opt.Root, "cache.db"))
if err != nil {
return nil, err
}
frontends := map[string]frontend.Frontend{}
frontends["dockerfile.v0"] = dockerfile.NewDockerfileFrontend()
// frontends["gateway.v0"] = gateway.NewGatewayFrontend()
// mdb := ctdmetadata.NewDB(db, c, map[string]ctdsnapshot.Snapshotter{
// "moby": s,
// })
// if err := mdb.Init(context.TODO()); err != nil {
// return opt, err
// }
//
// throttledGC := throttle.Throttle(time.Second, func() {
// if _, err := mdb.GarbageCollect(context.TODO()); err != nil {
// logrus.Errorf("GC error: %+v", err)
// }
// })
//
// gc := func(ctx context.Context) error {
// throttledGC()
// return nil
// }
wopt := mobyworker.WorkerOpt{
ID: "moby",
SessionManager: opt.SessionManager,
MetadataStore: md,
ContentStore: store,
CacheManager: cm,
Snapshotter: snapshotter,
Executor: exec,
ImageSource: src,
DownloadManager: dist.DownloadManager,
V2MetadataService: dist.V2MetadataService,
Exporters: map[string]exporter.Exporter{
"image": exp,
},
}
wc := &worker.Controller{}
w, err := mobyworker.NewWorker(wopt)
if err != nil {
return nil, err
}
wc.Add(w)
ci := cacheimport.NewCacheImporter(cacheimport.ImportOpt{
Worker: w,
SessionManager: opt.SessionManager,
})
return control.NewController(control.Opt{
SessionManager: opt.SessionManager,
WorkerController: wc,
Frontends: frontends,
CacheKeyStorage: cacheStorage,
// CacheExporter: ce,
CacheImporter: ci,
})
return &out, nil
}
type statusProxy struct {
@ -388,61 +197,6 @@ func (sp *statusProxy) RecvMsg(m interface{}) error {
return io.EOF
}
type results struct {
ch chan containerimageexp.Result
res map[string]containerimageexp.Result
mu sync.Mutex
cond *sync.Cond
}
func newResultsGetter() *results {
r := &results{
ch: make(chan containerimageexp.Result),
res: map[string]containerimageexp.Result{},
}
r.cond = sync.NewCond(&r.mu)
go func() {
for res := range r.ch {
r.mu.Lock()
r.res[res.Ref] = res
r.cond.Broadcast()
r.mu.Unlock()
}
}()
return r
}
func (r *results) wait(ctx context.Context, ref string) (*containerimageexp.Result, error) {
done := make(chan struct{})
defer close(done)
go func() {
select {
case <-ctx.Done():
r.mu.Lock()
r.cond.Broadcast()
r.mu.Unlock()
case <-done:
}
}()
r.mu.Lock()
for {
select {
case <-ctx.Done():
r.mu.Unlock()
return nil, ctx.Err()
default:
}
res, ok := r.res[ref]
if ok {
r.mu.Unlock()
return &res, nil
}
r.cond.Wait()
}
}
type contentStoreNoLabels struct {
content.Store
}

View file

@ -0,0 +1,158 @@
package buildkit
import (
"os"
"path/filepath"
"github.com/containerd/containerd/content/local"
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
"github.com/docker/docker/builder/builder-next/adapters/snapshot"
containerimageexp "github.com/docker/docker/builder/builder-next/exporter"
mobyworker "github.com/docker/docker/builder/builder-next/worker"
"github.com/docker/docker/daemon/graphdriver"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/cache/remotecache"
"github.com/moby/buildkit/control"
"github.com/moby/buildkit/executor/runcexecutor"
"github.com/moby/buildkit/exporter"
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/frontend/dockerfile"
"github.com/moby/buildkit/snapshot/blobmapping"
"github.com/moby/buildkit/solver/boltdbcachestorage"
"github.com/moby/buildkit/worker"
"github.com/pkg/errors"
)
func newController(opt Opt) (*control.Controller, error) {
if err := os.MkdirAll(opt.Root, 0700); err != nil {
return nil, err
}
dist := opt.Dist
root := opt.Root
var driver graphdriver.Driver
if ls, ok := dist.LayerStore.(interface {
Driver() graphdriver.Driver
}); ok {
driver = ls.Driver()
} else {
return nil, errors.Errorf("could not access graphdriver")
}
sbase, err := snapshot.NewSnapshotter(snapshot.Opt{
GraphDriver: driver,
LayerStore: dist.LayerStore,
Root: root,
})
if err != nil {
return nil, err
}
store, err := local.NewStore(filepath.Join(root, "content"))
if err != nil {
return nil, err
}
store = &contentStoreNoLabels{store}
md, err := metadata.NewStore(filepath.Join(root, "metadata.db"))
if err != nil {
return nil, err
}
snapshotter := blobmapping.NewSnapshotter(blobmapping.Opt{
Content: store,
Snapshotter: sbase,
MetadataStore: md,
})
cm, err := cache.NewManager(cache.ManagerOpt{
Snapshotter: snapshotter,
MetadataStore: md,
})
if err != nil {
return nil, err
}
src, err := containerimage.NewSource(containerimage.SourceOpt{
SessionManager: opt.SessionManager,
CacheAccessor: cm,
ContentStore: store,
DownloadManager: dist.DownloadManager,
MetadataStore: dist.V2MetadataService,
ImageStore: dist.ImageStore,
ReferenceStore: dist.ReferenceStore,
})
if err != nil {
return nil, err
}
exec, err := runcexecutor.New(runcexecutor.Opt{
Root: filepath.Join(root, "executor"),
CommandCandidates: []string{"docker-runc", "runc"},
})
if err != nil {
return nil, err
}
differ, ok := sbase.(containerimageexp.Differ)
if !ok {
return nil, errors.Errorf("snapshotter doesn't support differ")
}
exp, err := containerimageexp.New(containerimageexp.Opt{
ImageStore: dist.ImageStore,
ReferenceStore: dist.ReferenceStore,
Differ: differ,
})
if err != nil {
return nil, err
}
cacheStorage, err := boltdbcachestorage.NewStore(filepath.Join(opt.Root, "cache.db"))
if err != nil {
return nil, err
}
frontends := map[string]frontend.Frontend{}
frontends["dockerfile.v0"] = dockerfile.NewDockerfileFrontend()
// frontends["gateway.v0"] = gateway.NewGatewayFrontend()
wopt := mobyworker.WorkerOpt{
ID: "moby",
SessionManager: opt.SessionManager,
MetadataStore: md,
ContentStore: store,
CacheManager: cm,
Snapshotter: snapshotter,
Executor: exec,
ImageSource: src,
DownloadManager: dist.DownloadManager,
V2MetadataService: dist.V2MetadataService,
Exporters: map[string]exporter.Exporter{
"moby": exp,
},
}
wc := &worker.Controller{}
w, err := mobyworker.NewWorker(wopt)
if err != nil {
return nil, err
}
wc.Add(w)
ci := remotecache.NewCacheImporter(remotecache.ImportOpt{
Worker: w,
SessionManager: opt.SessionManager,
})
return control.NewController(control.Opt{
SessionManager: opt.SessionManager,
WorkerController: wc,
Frontends: frontends,
CacheKeyStorage: cacheStorage,
// CacheExporter: ce,
CacheImporter: ci,
})
}

View file

@ -23,17 +23,10 @@ type Differ interface {
EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error)
}
// TODO: this needs to be handled differently (return from solve)
type Result struct {
Ref string
ID image.ID
}
type Opt struct {
ImageStore image.Store
ReferenceStore reference.Store
Differ Differ
Reporter chan Result
}
type imageExporter struct {
@ -57,8 +50,6 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
i.targetName = ref
case exporterImageConfig:
i.config = []byte(v)
case "ref":
i.ref = v
default:
logrus.Warnf("image exporter: unknown option %s", k)
}
@ -70,14 +61,13 @@ type imageExporterInstance struct {
*imageExporter
targetName distref.Named
config []byte
ref string
}
func (e *imageExporterInstance) Name() string {
return "exporting to image"
}
func (e *imageExporterInstance) Export(ctx context.Context, ref cache.ImmutableRef, opt map[string][]byte) error {
func (e *imageExporterInstance) Export(ctx context.Context, ref cache.ImmutableRef, opt map[string][]byte) (map[string]string, error) {
if config, ok := opt[exporterImageConfig]; ok {
e.config = config
}
@ -86,12 +76,12 @@ func (e *imageExporterInstance) Export(ctx context.Context, ref cache.ImmutableR
layersDone := oneOffProgress(ctx, "exporting layers")
if err := ref.Finalize(ctx); err != nil {
return err
return nil, err
}
diffIDs, err := e.opt.Differ.EnsureLayer(ctx, ref.ID())
if err != nil {
return err
return nil, err
}
diffs := make([]digest.Digest, len(diffIDs))
@ -105,20 +95,20 @@ func (e *imageExporterInstance) Export(ctx context.Context, ref cache.ImmutableR
var err error
config, err = emptyImageConfig()
if err != nil {
return err
return nil, err
}
}
history, err := parseHistoryFromConfig(config)
if err != nil {
return err
return nil, err
}
diffs, history = normalizeLayersAndHistory(diffs, history, ref)
config, err = patchImageConfig(config, diffs, history)
if err != nil {
return err
return nil, err
}
configDigest := digest.FromBytes(config)
@ -126,7 +116,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, ref cache.ImmutableR
configDone := oneOffProgress(ctx, fmt.Sprintf("writing image %s", configDigest))
id, err := e.opt.ImageStore.Create(config)
if err != nil {
return configDone(err)
return nil, configDone(err)
}
configDone(nil)
@ -135,15 +125,13 @@ func (e *imageExporterInstance) Export(ctx context.Context, ref cache.ImmutableR
tagDone := oneOffProgress(ctx, "naming to "+e.targetName.String())
if err := e.opt.ReferenceStore.AddTag(e.targetName, digest.Digest(id), true); err != nil {
return tagDone(err)
return nil, tagDone(err)
}
tagDone(nil)
}
}
if e.opt.Reporter != nil {
e.opt.Reporter <- Result{ID: id, Ref: e.ref}
}
return nil
return map[string]string{
"containerimage.digest": id.String(),
}, nil
}

View file

@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"runtime"
"strings"
"time"
"github.com/moby/buildkit/cache"
@ -16,9 +15,9 @@ import (
"github.com/sirupsen/logrus"
)
const (
emptyGZLayer = digest.Digest("sha256:4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1")
)
// const (
// emptyGZLayer = digest.Digest("sha256:4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1")
// )
func emptyImageConfig() ([]byte, error) {
img := ocispec.Image{
@ -65,18 +64,26 @@ func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History)
}
m["history"] = dt
// now := time.Now()
// dt, err = json.Marshal(&now)
// if err != nil {
// return nil, errors.Wrap(err, "failed to marshal creation time")
// }
// m["created"] = dt
if _, ok := m["created"]; !ok {
var tm *time.Time
for _, h := range history {
if h.Created != nil {
tm = h.Created
}
}
dt, err = json.Marshal(&tm)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal creation time")
}
m["created"] = dt
}
dt, err = json.Marshal(m)
return dt, errors.Wrap(err, "failed to marshal config after patch")
}
func normalizeLayersAndHistory(diffs []digest.Digest, history []ocispec.History, ref cache.ImmutableRef) ([]digest.Digest, []ocispec.History) {
refMeta := getRefMetadata(ref, len(diffs))
var historyLayers int
for _, h := range history {
if !h.EmptyLayer {
@ -103,11 +110,10 @@ func normalizeLayersAndHistory(diffs []digest.Digest, history []ocispec.History,
if len(diffs) > historyLayers {
// some history items are missing. add them based on the ref metadata
for _, msg := range getRefDesciptions(ref, len(diffs)-historyLayers) {
// tm := time.Now().UTC()
for _, md := range refMeta[historyLayers:] {
history = append(history, ocispec.History{
// Created: &tm,
CreatedBy: msg,
Created: &md.createdAt,
CreatedBy: md.description,
Comment: "buildkit.exporter.image.v0",
})
}
@ -129,23 +135,31 @@ func normalizeLayersAndHistory(diffs []digest.Digest, history []ocispec.History,
return diffs, history
}
func getRefDesciptions(ref cache.ImmutableRef, limit int) []string {
type refMetadata struct {
description string
createdAt time.Time
}
func getRefMetadata(ref cache.ImmutableRef, limit int) []refMetadata {
if limit <= 0 {
return nil
}
defaultMsg := "created by buildkit" // shouldn't happen but don't fail build
meta := refMetadata{
description: "created by buildkit", // shouldn't be shown but don't fail build
createdAt: time.Now(),
}
if ref == nil {
strings.Repeat(defaultMsg, limit)
return append(getRefMetadata(nil, limit-1), meta)
}
descr := cache.GetDescription(ref.Metadata())
if descr == "" {
descr = defaultMsg
if descr := cache.GetDescription(ref.Metadata()); descr != "" {
meta.description = descr
}
meta.createdAt = cache.GetCreatedAt(ref.Metadata())
p := ref.Parent()
if p != nil {
defer p.Release(context.TODO())
}
return append(getRefDesciptions(p, limit-1), descr)
return append(getRefMetadata(p, limit-1), meta)
}
func oneOffProgress(ctx context.Context, id string) func(err error) error {

View file

@ -24,8 +24,8 @@ import (
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver-next"
"github.com/moby/buildkit/solver-next/llbsolver/ops"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/llbsolver/ops"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/source"
"github.com/moby/buildkit/source/git"
@ -39,8 +39,6 @@ import (
netcontext "golang.org/x/net/context"
)
// TODO: this file should be removed. containerd defines ContainerdWorker, oci defines OCIWorker. There is no base worker.
// WorkerOpt is specific to a worker.
// See also CommonOpt.
type WorkerOpt struct {
@ -56,7 +54,6 @@ type WorkerOpt struct {
Exporters map[string]exporter.Exporter
DownloadManager distribution.RootFSDownloadManager
V2MetadataService distmetadata.V2MetadataService
// ImageStore images.Store // optional
}
// Worker is a local worker instance with dedicated snapshotter, cache, and so on.
@ -64,8 +61,6 @@ type WorkerOpt struct {
type Worker struct {
WorkerOpt
SourceManager *source.Manager
// Exporters map[string]exporter.Exporter
// ImageSource source.Source
}
// NewWorker instantiates a local worker
@ -177,36 +172,7 @@ func (w *Worker) Exporter(name string) (exporter.Exporter, error) {
return exp, nil
}
func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef) (*solver.Remote, error) {
// diffPairs, err := blobs.GetDiffPairs(ctx, w.ContentStore, w.Snapshotter, w.Differ, ref)
// if err != nil {
// return nil, errors.Wrap(err, "failed calculaing diff pairs for exported snapshot")
// }
// if len(diffPairs) == 0 {
// return nil, nil
// }
//
// descs := make([]ocispec.Descriptor, len(diffPairs))
//
// for i, dp := range diffPairs {
// info, err := w.ContentStore.Info(ctx, dp.Blobsum)
// if err != nil {
// return nil, err
// }
// descs[i] = ocispec.Descriptor{
// Digest: dp.Blobsum,
// Size: info.Size,
// MediaType: schema2.MediaTypeLayer,
// Annotations: map[string]string{
// "containerd.io/uncompressed": dp.DiffID.String(),
// },
// }
// }
//
// return &solver.Remote{
// Descriptors: descs,
// Provider: w.ContentStore,
// }, nil
func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool) (*solver.Remote, error) {
return nil, errors.Errorf("getremote not implemented")
}
@ -226,7 +192,6 @@ func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.I
provider: remote.Provider,
w: w,
pctx: ctx,
// ref: l.Blob.Digest.String(),
})
}
@ -243,73 +208,13 @@ func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.I
}
defer release()
ref, err := w.CacheManager.Get(ctx, string(rootFS.ChainID()), cache.WithDescription(fmt.Sprintf("imported %s", remote.Descriptors[len(remote.Descriptors)-1].Digest)))
ref, err := w.CacheManager.GetFromSnapshotter(ctx, string(rootFS.ChainID()), cache.WithDescription(fmt.Sprintf("imported %s", remote.Descriptors[len(remote.Descriptors)-1].Digest)))
if err != nil {
return nil, err
}
// eg, gctx := errgroup.WithContext(ctx)
// for _, desc := range remote.Descriptors {
// func(desc ocispec.Descriptor) {
// eg.Go(func() error {
// done := oneOffProgress(ctx, fmt.Sprintf("pulling %s", desc.Digest))
// return done(contentutil.Copy(gctx, w.ContentStore, remote.Provider, desc))
// })
// }(desc)
// }
//
// if err := eg.Wait(); err != nil {
// return nil, err
// }
//
// csh, release := snapshot.NewCompatibilitySnapshotter(w.Snapshotter)
// defer release()
//
// unpackProgressDone := oneOffProgress(ctx, "unpacking")
// chainID, err := w.unpack(ctx, remote.Descriptors, csh)
// if err != nil {
// return nil, unpackProgressDone(err)
// }
// unpackProgressDone(nil)
//
// return w.CacheManager.Get(ctx, chainID, cache.WithDescription(fmt.Sprintf("imported %s", remote.Descriptors[len(remote.Descriptors)-1].Digest)))
// return nil, errors.Errorf("fromremote not implemented")
return ref, nil
}
// utility function. could be moved to the constructor logic?
// func Labels(executor, snapshotter string) map[string]string {
// hostname, err := os.Hostname()
// if err != nil {
// hostname = "unknown"
// }
// labels := map[string]string{
// worker.LabelOS: runtime.GOOS,
// worker.LabelArch: runtime.GOOSARCH,
// worker.LabelExecutor: executor,
// worker.LabelSnapshotter: snapshotter,
// worker.LabelHostname: hostname,
// }
// return labels
// }
//
// // ID reads the worker id from the `workerid` file.
// // If not exist, it creates a random one,
// func ID(root string) (string, error) {
// f := filepath.Join(root, "workerid")
// b, err := ioutil.ReadFile(f)
// if err != nil {
// if os.IsNotExist(err) {
// id := identity.NewID()
// err := ioutil.WriteFile(f, []byte(id), 0400)
// return id, err
// } else {
// return "", err
// }
// }
// return string(b), nil
// }
type discardProgress struct{}
func (_ *discardProgress) WriteProgress(_ pkgprogress.Progress) error {