Browse Source

vendor: buildkit to 98f1604134f945d48538ffca0e18662337b4a850

Signed-off-by: Tibor Vass <tibor@docker.com>
Tibor Vass 7 years ago
parent
commit
0ab7c1c5ba
55 changed files with 3831 additions and 1458 deletions
  1. 13 16
      builder/builder-next/controller.go
  2. 1 1
      builder/builder-next/exporter/export.go
  3. 1 1
      vendor.conf
  4. 1 1
      vendor/github.com/moby/buildkit/README.md
  5. 79 419
      vendor/github.com/moby/buildkit/api/services/control/control.pb.go
  6. 7 8
      vendor/github.com/moby/buildkit/api/services/control/control.proto
  7. 3 0
      vendor/github.com/moby/buildkit/api/types/generate.go
  8. 523 0
      vendor/github.com/moby/buildkit/api/types/worker.pb.go
  9. 16 0
      vendor/github.com/moby/buildkit/api/types/worker.proto
  10. 1 1
      vendor/github.com/moby/buildkit/cache/manager.go
  11. 12 5
      vendor/github.com/moby/buildkit/cache/refs.go
  12. 41 56
      vendor/github.com/moby/buildkit/cache/remotecache/export.go
  13. 38 64
      vendor/github.com/moby/buildkit/cache/remotecache/import.go
  14. 73 0
      vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go
  15. 9 0
      vendor/github.com/moby/buildkit/client/llb/source.go
  16. 10 1
      vendor/github.com/moby/buildkit/client/llb/state.go
  17. 1 15
      vendor/github.com/moby/buildkit/client/workers.go
  18. 20 28
      vendor/github.com/moby/buildkit/control/control.go
  19. 7 7
      vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go
  20. 53 35
      vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
  21. 0 41
      vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile.go
  22. 24 29
      vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
  23. 0 86
      vendor/github.com/moby/buildkit/frontend/dockerfile/forward.go
  24. 15 2
      vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go
  25. 6 10
      vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go
  26. 8 9
      vendor/github.com/moby/buildkit/frontend/frontend.go
  27. 15 4
      vendor/github.com/moby/buildkit/frontend/gateway/client/client.go
  28. 54 0
      vendor/github.com/moby/buildkit/frontend/gateway/client/result.go
  29. 149 0
      vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go
  30. 38 0
      vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go
  31. 199 52
      vendor/github.com/moby/buildkit/frontend/gateway/gateway.go
  32. 64 0
      vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go
  33. 1248 121
      vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go
  34. 40 2
      vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto
  35. 23 0
      vendor/github.com/moby/buildkit/frontend/result.go
  36. 39 22
      vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go
  37. 6 2
      vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go
  38. 29 26
      vendor/github.com/moby/buildkit/solver/llbsolver/solver.go
  39. 3 2
      vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go
  40. 1 0
      vendor/github.com/moby/buildkit/solver/pb/attr.go
  41. 1 0
      vendor/github.com/moby/buildkit/solver/pb/ops.pb.go
  42. 1 0
      vendor/github.com/moby/buildkit/solver/pb/ops.proto
  43. 41 0
      vendor/github.com/moby/buildkit/solver/pb/platform.go
  44. 161 0
      vendor/github.com/moby/buildkit/util/apicaps/caps.go
  45. 535 0
      vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go
  46. 19 0
      vendor/github.com/moby/buildkit/util/apicaps/pb/caps.proto
  47. 3 0
      vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go
  48. 5 12
      vendor/github.com/moby/buildkit/util/contentutil/fetcher.go
  49. 58 0
      vendor/github.com/moby/buildkit/util/contentutil/pusher.go
  50. 0 1
      vendor/github.com/moby/buildkit/util/libcontainer_specconv/README.md
  51. 0 190
      vendor/github.com/moby/buildkit/util/libcontainer_specconv/example.go
  52. 0 184
      vendor/github.com/moby/buildkit/util/push/push.go
  53. 113 0
      vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_linux.go
  54. 7 5
      vendor/github.com/moby/buildkit/vendor.conf
  55. 17 0
      vendor/github.com/moby/buildkit/worker/workercontroller.go

+ 13 - 16
builder/builder-next/controller.go

@@ -13,12 +13,13 @@ import (
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/moby/buildkit/cache"
 	"github.com/moby/buildkit/cache/metadata"
-	"github.com/moby/buildkit/cache/remotecache"
+	registryremotecache "github.com/moby/buildkit/cache/remotecache/registry"
 	"github.com/moby/buildkit/control"
 	"github.com/moby/buildkit/exporter"
 	"github.com/moby/buildkit/frontend"
-	"github.com/moby/buildkit/frontend/dockerfile"
+	dockerfile "github.com/moby/buildkit/frontend/dockerfile/builder"
 	"github.com/moby/buildkit/frontend/gateway"
+	"github.com/moby/buildkit/frontend/gateway/forwarder"
 	"github.com/moby/buildkit/snapshot/blobmapping"
 	"github.com/moby/buildkit/solver/boltdbcachestorage"
 	"github.com/moby/buildkit/worker"
@@ -113,10 +114,6 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
 		return nil, err
 	}
 
-	frontends := map[string]frontend.Frontend{}
-	frontends["dockerfile.v0"] = dockerfile.NewDockerfileFrontend()
-	frontends["gateway.v0"] = gateway.NewGatewayFrontend()
-
 	wopt := mobyworker.Opt{
 		ID:                "moby",
 		SessionManager:    opt.SessionManager,
@@ -141,17 +138,17 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
 	}
 	wc.Add(w)
 
-	ci := remotecache.NewCacheImporter(remotecache.ImportOpt{
-		Worker:         w,
-		SessionManager: opt.SessionManager,
-	})
+	frontends := map[string]frontend.Frontend{
+		"dockerfile.v0": forwarder.NewGatewayForwarder(wc, dockerfile.Build),
+		"gateway.v0":    gateway.NewGatewayFrontend(wc),
+	}
 
 	return control.NewController(control.Opt{
-		SessionManager:   opt.SessionManager,
-		WorkerController: wc,
-		Frontends:        frontends,
-		CacheKeyStorage:  cacheStorage,
-		// CacheExporter:    ce,
-		CacheImporter: ci,
+		SessionManager:           opt.SessionManager,
+		WorkerController:         wc,
+		Frontends:                frontends,
+		CacheKeyStorage:          cacheStorage,
+		ResolveCacheImporterFunc: registryremotecache.ResolveCacheImporterFunc(opt.SessionManager),
+		// TODO: set ResolveCacheExporterFunc for exporting cache
 	})
 }

+ 1 - 1
builder/builder-next/exporter/export.go

@@ -83,7 +83,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, ref cache.ImmutableR
 	if ref != nil {
 		layersDone := oneOffProgress(ctx, "exporting layers")
 
-		if err := ref.Finalize(ctx); err != nil {
+		if err := ref.Finalize(ctx, true); err != nil {
 			return nil, err
 		}
 

+ 1 - 1
vendor.conf

@@ -26,7 +26,7 @@ github.com/imdario/mergo v0.3.5
 golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5
 
 # buildkit
-github.com/moby/buildkit 9acf51e49185b348608e0096b2903dd72907adcb
+github.com/moby/buildkit 98f1604134f945d48538ffca0e18662337b4a850
 github.com/tonistiigi/fsutil 8abad97ee3969cdf5e9c367f46adba2c212b3ddb
 github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
 github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7

+ 1 - 1
vendor/github.com/moby/buildkit/README.md

@@ -256,7 +256,7 @@ make test TESTPKGS=./client
 make test TESTPKGS=./client TESTFLAGS="--run /TestCallDiskUsage -v" 
 
 # run all integration tests with a specific worker
-# supported workers are oci and containerd
+# supported workers: oci, oci-rootless, containerd, containerd-1.0
 make test TESTPKGS=./client TESTFLAGS="--run //worker=containerd -v" 
 ```
 

+ 79 - 419
vendor/github.com/moby/buildkit/api/services/control/control.pb.go

@@ -23,7 +23,6 @@
 		BytesMessage
 		ListWorkersRequest
 		ListWorkersResponse
-		WorkerRecord
 */
 package moby_buildkit_v1
 
@@ -33,6 +32,7 @@ import math "math"
 import _ "github.com/gogo/protobuf/gogoproto"
 import _ "github.com/golang/protobuf/ptypes/timestamp"
 import pb "github.com/moby/buildkit/solver/pb"
+import moby_buildkit_v1_types "github.com/moby/buildkit/api/types"
 
 import time "time"
 import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
@@ -526,7 +526,7 @@ func (m *ListWorkersRequest) GetFilter() []string {
 }
 
 type ListWorkersResponse struct {
-	Record []*WorkerRecord `protobuf:"bytes,1,rep,name=record" json:"record,omitempty"`
+	Record []*moby_buildkit_v1_types.WorkerRecord `protobuf:"bytes,1,rep,name=record" json:"record,omitempty"`
 }
 
 func (m *ListWorkersResponse) Reset()                    { *m = ListWorkersResponse{} }
@@ -534,45 +534,13 @@ func (m *ListWorkersResponse) String() string            { return proto.CompactT
 func (*ListWorkersResponse) ProtoMessage()               {}
 func (*ListWorkersResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{14} }
 
-func (m *ListWorkersResponse) GetRecord() []*WorkerRecord {
+func (m *ListWorkersResponse) GetRecord() []*moby_buildkit_v1_types.WorkerRecord {
 	if m != nil {
 		return m.Record
 	}
 	return nil
 }
 
-type WorkerRecord struct {
-	ID        string            `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
-	Labels    map[string]string `protobuf:"bytes,2,rep,name=Labels" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
-	Platforms []pb.Platform     `protobuf:"bytes,3,rep,name=platforms" json:"platforms"`
-}
-
-func (m *WorkerRecord) Reset()                    { *m = WorkerRecord{} }
-func (m *WorkerRecord) String() string            { return proto.CompactTextString(m) }
-func (*WorkerRecord) ProtoMessage()               {}
-func (*WorkerRecord) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{15} }
-
-func (m *WorkerRecord) GetID() string {
-	if m != nil {
-		return m.ID
-	}
-	return ""
-}
-
-func (m *WorkerRecord) GetLabels() map[string]string {
-	if m != nil {
-		return m.Labels
-	}
-	return nil
-}
-
-func (m *WorkerRecord) GetPlatforms() []pb.Platform {
-	if m != nil {
-		return m.Platforms
-	}
-	return nil
-}
-
 func init() {
 	proto.RegisterType((*PruneRequest)(nil), "moby.buildkit.v1.PruneRequest")
 	proto.RegisterType((*DiskUsageRequest)(nil), "moby.buildkit.v1.DiskUsageRequest")
@@ -589,7 +557,6 @@ func init() {
 	proto.RegisterType((*BytesMessage)(nil), "moby.buildkit.v1.BytesMessage")
 	proto.RegisterType((*ListWorkersRequest)(nil), "moby.buildkit.v1.ListWorkersRequest")
 	proto.RegisterType((*ListWorkersResponse)(nil), "moby.buildkit.v1.ListWorkersResponse")
-	proto.RegisterType((*WorkerRecord)(nil), "moby.buildkit.v1.WorkerRecord")
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -1620,59 +1587,6 @@ func (m *ListWorkersResponse) MarshalTo(dAtA []byte) (int, error) {
 	return i, nil
 }
 
-func (m *WorkerRecord) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalTo(dAtA)
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *WorkerRecord) MarshalTo(dAtA []byte) (int, error) {
-	var i int
-	_ = i
-	var l int
-	_ = l
-	if len(m.ID) > 0 {
-		dAtA[i] = 0xa
-		i++
-		i = encodeVarintControl(dAtA, i, uint64(len(m.ID)))
-		i += copy(dAtA[i:], m.ID)
-	}
-	if len(m.Labels) > 0 {
-		for k, _ := range m.Labels {
-			dAtA[i] = 0x12
-			i++
-			v := m.Labels[k]
-			mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
-			i = encodeVarintControl(dAtA, i, uint64(mapSize))
-			dAtA[i] = 0xa
-			i++
-			i = encodeVarintControl(dAtA, i, uint64(len(k)))
-			i += copy(dAtA[i:], k)
-			dAtA[i] = 0x12
-			i++
-			i = encodeVarintControl(dAtA, i, uint64(len(v)))
-			i += copy(dAtA[i:], v)
-		}
-	}
-	if len(m.Platforms) > 0 {
-		for _, msg := range m.Platforms {
-			dAtA[i] = 0x1a
-			i++
-			i = encodeVarintControl(dAtA, i, uint64(msg.Size()))
-			n, err := msg.MarshalTo(dAtA[i:])
-			if err != nil {
-				return 0, err
-			}
-			i += n
-		}
-	}
-	return i, nil
-}
-
 func encodeVarintControl(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -1984,30 +1898,6 @@ func (m *ListWorkersResponse) Size() (n int) {
 	return n
 }
 
-func (m *WorkerRecord) Size() (n int) {
-	var l int
-	_ = l
-	l = len(m.ID)
-	if l > 0 {
-		n += 1 + l + sovControl(uint64(l))
-	}
-	if len(m.Labels) > 0 {
-		for k, v := range m.Labels {
-			_ = k
-			_ = v
-			mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
-			n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
-		}
-	}
-	if len(m.Platforms) > 0 {
-		for _, e := range m.Platforms {
-			l = e.Size()
-			n += 1 + l + sovControl(uint64(l))
-		}
-	}
-	return n
-}
-
 func sovControl(x uint64) (n int) {
 	for {
 		n++
@@ -4487,7 +4377,7 @@ func (m *ListWorkersResponse) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Record = append(m.Record, &WorkerRecord{})
+			m.Record = append(m.Record, &moby_buildkit_v1_types.WorkerRecord{})
 			if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -4513,234 +4403,6 @@ func (m *ListWorkersResponse) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
-func (m *WorkerRecord) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowControl
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: WorkerRecord: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: WorkerRecord: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowControl
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthControl
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.ID = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowControl
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthControl
-			}
-			postIndex := iNdEx + msglen
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Labels == nil {
-				m.Labels = make(map[string]string)
-			}
-			var mapkey string
-			var mapvalue string
-			for iNdEx < postIndex {
-				entryPreIndex := iNdEx
-				var wire uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowControl
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				fieldNum := int32(wire >> 3)
-				if fieldNum == 1 {
-					var stringLenmapkey uint64
-					for shift := uint(0); ; shift += 7 {
-						if shift >= 64 {
-							return ErrIntOverflowControl
-						}
-						if iNdEx >= l {
-							return io.ErrUnexpectedEOF
-						}
-						b := dAtA[iNdEx]
-						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
-						if b < 0x80 {
-							break
-						}
-					}
-					intStringLenmapkey := int(stringLenmapkey)
-					if intStringLenmapkey < 0 {
-						return ErrInvalidLengthControl
-					}
-					postStringIndexmapkey := iNdEx + intStringLenmapkey
-					if postStringIndexmapkey > l {
-						return io.ErrUnexpectedEOF
-					}
-					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
-					iNdEx = postStringIndexmapkey
-				} else if fieldNum == 2 {
-					var stringLenmapvalue uint64
-					for shift := uint(0); ; shift += 7 {
-						if shift >= 64 {
-							return ErrIntOverflowControl
-						}
-						if iNdEx >= l {
-							return io.ErrUnexpectedEOF
-						}
-						b := dAtA[iNdEx]
-						iNdEx++
-						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-						if b < 0x80 {
-							break
-						}
-					}
-					intStringLenmapvalue := int(stringLenmapvalue)
-					if intStringLenmapvalue < 0 {
-						return ErrInvalidLengthControl
-					}
-					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-					if postStringIndexmapvalue > l {
-						return io.ErrUnexpectedEOF
-					}
-					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
-					iNdEx = postStringIndexmapvalue
-				} else {
-					iNdEx = entryPreIndex
-					skippy, err := skipControl(dAtA[iNdEx:])
-					if err != nil {
-						return err
-					}
-					if skippy < 0 {
-						return ErrInvalidLengthControl
-					}
-					if (iNdEx + skippy) > postIndex {
-						return io.ErrUnexpectedEOF
-					}
-					iNdEx += skippy
-				}
-			}
-			m.Labels[mapkey] = mapvalue
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Platforms", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowControl
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthControl
-			}
-			postIndex := iNdEx + msglen
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Platforms = append(m.Platforms, pb.Platform{})
-			if err := m.Platforms[len(m.Platforms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipControl(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthControl
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
 func skipControl(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
@@ -4849,81 +4511,79 @@ var (
 func init() { proto.RegisterFile("control.proto", fileDescriptorControl) }
 
 var fileDescriptorControl = []byte{
-	// 1214 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4f, 0x6f, 0x1b, 0x55,
-	0x10, 0x67, 0x6d, 0xc7, 0xf6, 0x8e, 0x9d, 0x28, 0x3c, 0xa0, 0x5a, 0x2d, 0x90, 0x98, 0x05, 0x24,
-	0xab, 0x6a, 0xd7, 0x69, 0xa0, 0x08, 0x72, 0xa8, 0x5a, 0xc7, 0x45, 0x24, 0x4a, 0x44, 0xd8, 0x34,
-	0x54, 0xe2, 0xb6, 0xb6, 0x5f, 0xdc, 0x55, 0xd6, 0xfb, 0x96, 0xf7, 0x9e, 0xa3, 0x86, 0x4f, 0xc1,
-	0x81, 0x6f, 0xc2, 0x81, 0x33, 0x07, 0xa4, 0xde, 0xe0, 0xcc, 0x21, 0x45, 0xb9, 0xc3, 0x67, 0x40,
-	0xef, 0xcf, 0xda, 0xcf, 0x5e, 0xe7, 0x8f, 0xd3, 0x93, 0xdf, 0xcc, 0xfe, 0xe6, 0xb7, 0xf3, 0x66,
-	0x66, 0x67, 0xc6, 0xb0, 0xdc, 0x23, 0x09, 0xa7, 0x24, 0xf6, 0x53, 0x4a, 0x38, 0x41, 0xab, 0x43,
-	0xd2, 0x3d, 0xf3, 0xbb, 0xa3, 0x28, 0xee, 0x9f, 0x44, 0xdc, 0x3f, 0x7d, 0xe0, 0xde, 0x1f, 0x44,
-	0xfc, 0xc5, 0xa8, 0xeb, 0xf7, 0xc8, 0xb0, 0x35, 0x20, 0x03, 0xd2, 0x92, 0xc0, 0xee, 0xe8, 0x58,
-	0x4a, 0x52, 0x90, 0x27, 0x45, 0xe0, 0xae, 0x0f, 0x08, 0x19, 0xc4, 0x78, 0x82, 0xe2, 0xd1, 0x10,
-	0x33, 0x1e, 0x0e, 0x53, 0x0d, 0xb8, 0x67, 0xf0, 0x89, 0x97, 0xb5, 0xb2, 0x97, 0xb5, 0x18, 0x89,
-	0x4f, 0x31, 0x6d, 0xa5, 0xdd, 0x16, 0x49, 0x99, 0x42, 0x7b, 0x2b, 0x50, 0x3f, 0xa0, 0xa3, 0x04,
-	0x07, 0xf8, 0xc7, 0x11, 0x66, 0xdc, 0xbb, 0x0b, 0xab, 0x9d, 0x88, 0x9d, 0x1c, 0xb1, 0x70, 0x90,
-	0xe9, 0xd0, 0x1d, 0x28, 0x1f, 0x47, 0x31, 0xc7, 0xd4, 0xb1, 0x1a, 0x56, 0xd3, 0x0e, 0xb4, 0xe4,
-	0xed, 0xc2, 0xdb, 0x06, 0x96, 0xa5, 0x24, 0x61, 0x18, 0x3d, 0x84, 0x32, 0xc5, 0x3d, 0x42, 0xfb,
-	0x8e, 0xd5, 0x28, 0x36, 0x6b, 0x9b, 0x1f, 0xfa, 0xb3, 0x37, 0xf6, 0xb5, 0x81, 0x00, 0x05, 0x1a,
-	0xec, 0xfd, 0x5e, 0x80, 0x9a, 0xa1, 0x47, 0x2b, 0x50, 0xd8, 0xe9, 0xe8, 0xf7, 0x15, 0x76, 0x3a,
-	0xc8, 0x81, 0xca, 0xfe, 0x88, 0x87, 0xdd, 0x18, 0x3b, 0x85, 0x86, 0xd5, 0xac, 0x06, 0x99, 0x88,
-	0xde, 0x85, 0xa5, 0x9d, 0xe4, 0x88, 0x61, 0xa7, 0x28, 0xf5, 0x4a, 0x40, 0x08, 0x4a, 0x87, 0xd1,
-	0x4f, 0xd8, 0x29, 0x35, 0xac, 0x66, 0x31, 0x90, 0x67, 0x71, 0x8f, 0x83, 0x90, 0xe2, 0x84, 0x3b,
-	0x4b, 0xea, 0x1e, 0x4a, 0x42, 0x6d, 0xb0, 0xb7, 0x29, 0x0e, 0x39, 0xee, 0x3f, 0xe1, 0x4e, 0xb9,
-	0x61, 0x35, 0x6b, 0x9b, 0xae, 0xaf, 0xc2, 0xec, 0x67, 0x61, 0xf6, 0x9f, 0x65, 0x61, 0x6e, 0x57,
-	0x5f, 0x9d, 0xaf, 0xbf, 0xf5, 0xf3, 0xeb, 0x75, 0x2b, 0x98, 0x98, 0xa1, 0xc7, 0x00, 0x7b, 0x21,
-	0xe3, 0x47, 0x4c, 0x92, 0x54, 0xae, 0x25, 0x29, 0x49, 0x02, 0xc3, 0x06, 0xad, 0x01, 0xc8, 0x00,
-	0x6c, 0x93, 0x51, 0xc2, 0x9d, 0xaa, 0xf4, 0xdb, 0xd0, 0xa0, 0x06, 0xd4, 0x3a, 0x98, 0xf5, 0x68,
-	0x94, 0xf2, 0x88, 0x24, 0x8e, 0x2d, 0xaf, 0x60, 0xaa, 0xbc, 0x5f, 0x4a, 0x50, 0x3f, 0x14, 0x39,
-	0xce, 0x12, 0xb7, 0x0a, 0xc5, 0x00, 0x1f, 0xeb, 0x28, 0x8a, 0x23, 0xf2, 0x01, 0x3a, 0xf8, 0x38,
-	0x4a, 0x22, 0xc9, 0x51, 0x90, 0x6e, 0xae, 0xf8, 0x69, 0xd7, 0x9f, 0x68, 0x03, 0x03, 0x81, 0x5c,
-	0xa8, 0x3e, 0x7d, 0x99, 0x12, 0x2a, 0x92, 0x5f, 0x94, 0x34, 0x63, 0x19, 0x3d, 0x87, 0xe5, 0xec,
-	0xfc, 0x84, 0x73, 0xca, 0x9c, 0x92, 0x4c, 0xf8, 0x83, 0x7c, 0xc2, 0x4d, 0xa7, 0xfc, 0x29, 0x9b,
-	0xa7, 0x09, 0xa7, 0x67, 0xc1, 0x34, 0x8f, 0xc8, 0xf5, 0x21, 0x66, 0x4c, 0x78, 0xa8, 0x12, 0x95,
-	0x89, 0xc2, 0x9d, 0xaf, 0x29, 0x49, 0x38, 0x4e, 0xfa, 0x32, 0x51, 0x76, 0x30, 0x96, 0x85, 0x3b,
-	0xd9, 0x59, 0xb9, 0x53, 0xb9, 0x91, 0x3b, 0x53, 0x36, 0xda, 0x9d, 0x29, 0x1d, 0xda, 0x82, 0xa5,
-	0xed, 0xb0, 0xf7, 0x02, 0xcb, 0x9c, 0xd4, 0x36, 0xd7, 0xf2, 0x84, 0xf2, 0xf1, 0xb7, 0x32, 0x09,
-	0xac, 0x5d, 0x12, 0xe5, 0x11, 0x28, 0x13, 0xf7, 0x31, 0xa0, 0xfc, 0x7d, 0x45, 0x5e, 0x4e, 0xf0,
-	0x59, 0x96, 0x97, 0x13, 0x7c, 0x26, 0x8a, 0xf8, 0x34, 0x8c, 0x47, 0xaa, 0xb8, 0xed, 0x40, 0x09,
-	0x5b, 0x85, 0x2f, 0x2d, 0xc1, 0x90, 0x77, 0x71, 0x11, 0x06, 0xef, 0xb5, 0x05, 0x75, 0xd3, 0x43,
-	0xf4, 0x01, 0xd8, 0xca, 0xa9, 0x49, 0x71, 0x4c, 0x14, 0xa2, 0x0e, 0x77, 0x86, 0x5a, 0x60, 0x4e,
-	0xa1, 0x51, 0x6c, 0xda, 0x81, 0xa1, 0x41, 0xdf, 0x41, 0x4d, 0x81, 0x55, 0x94, 0x8b, 0x32, 0xca,
-	0xad, 0xab, 0x83, 0xe2, 0x1b, 0x16, 0x2a, 0xc6, 0x26, 0x87, 0xfb, 0x08, 0x56, 0x67, 0x01, 0x0b,
-	0xdd, 0xf0, 0x37, 0x0b, 0x96, 0x75, 0x52, 0x75, 0x17, 0x0a, 0x33, 0x46, 0x4c, 0x33, 0x9d, 0xee,
-	0x47, 0x0f, 0x2f, 0xad, 0x07, 0x05, 0xf3, 0x67, 0xed, 0x94, 0xbf, 0x39, 0x3a, 0x77, 0x1b, 0xde,
-	0x9b, 0x0b, 0x5d, 0xc8, 0xf3, 0x8f, 0x60, 0xf9, 0x90, 0x87, 0x7c, 0xc4, 0x2e, 0xfd, 0x64, 0xbd,
-	0x5f, 0x2d, 0x58, 0xc9, 0x30, 0xfa, 0x76, 0x9f, 0x43, 0xf5, 0x14, 0x53, 0x8e, 0x5f, 0x62, 0xa6,
-	0x6f, 0xe5, 0xe4, 0x6f, 0xf5, 0xbd, 0x44, 0x04, 0x63, 0x24, 0xda, 0x82, 0x2a, 0x93, 0x3c, 0x58,
-	0xa5, 0x75, 0x6e, 0x29, 0x2b, 0x2b, 0xfd, 0xbe, 0x31, 0x1e, 0xb5, 0xa0, 0x14, 0x93, 0x41, 0x96,
-	0xed, 0xf7, 0x2f, 0xb3, 0xdb, 0x23, 0x83, 0x40, 0x02, 0xbd, 0xf3, 0x02, 0x94, 0x95, 0x0e, 0xed,
-	0x42, 0xb9, 0x1f, 0x0d, 0x30, 0xe3, 0xea, 0x56, 0xed, 0x4d, 0xf1, 0x81, 0xfc, 0x7d, 0xbe, 0x7e,
-	0xd7, 0x18, 0x54, 0x24, 0xc5, 0x89, 0x18, 0x94, 0x61, 0x94, 0x60, 0xca, 0x5a, 0x03, 0x72, 0x5f,
-	0x99, 0xf8, 0x1d, 0xf9, 0x13, 0x68, 0x06, 0xc1, 0x15, 0x25, 0xe9, 0x88, 0xeb, 0xc2, 0xbc, 0x1d,
-	0x97, 0x62, 0x10, 0x23, 0x22, 0x09, 0x87, 0x58, 0xf7, 0x35, 0x79, 0x16, 0x23, 0xa2, 0x27, 0xea,
-	0xb6, 0x2f, 0x07, 0x47, 0x35, 0xd0, 0x12, 0xda, 0x82, 0x0a, 0xe3, 0x21, 0xe5, 0xb8, 0x2f, 0x5b,
-	0xd2, 0x4d, 0x7a, 0x7b, 0x66, 0x80, 0x1e, 0x81, 0xdd, 0x23, 0xc3, 0x34, 0xc6, 0xc2, 0xba, 0x7c,
-	0x43, 0xeb, 0x89, 0x89, 0xa8, 0x1e, 0x4c, 0x29, 0xa1, 0x72, 0xaa, 0xd8, 0x81, 0x12, 0xbc, 0xff,
-	0x0a, 0x50, 0x37, 0x93, 0x95, 0x9b, 0x98, 0xbb, 0x50, 0x56, 0xa9, 0x57, 0x55, 0x77, 0xbb, 0x50,
-	0x29, 0x86, 0xb9, 0xa1, 0x72, 0xa0, 0xd2, 0x1b, 0x51, 0x39, 0x4e, 0xd5, 0x90, 0xcd, 0x44, 0xe1,
-	0x30, 0x27, 0x3c, 0x8c, 0x65, 0xa8, 0x8a, 0x81, 0x12, 0xc4, 0x94, 0x1d, 0xaf, 0x2a, 0x8b, 0x4d,
-	0xd9, 0xb1, 0x99, 0x99, 0x86, 0xca, 0x1b, 0xa5, 0xa1, 0xba, 0x70, 0x1a, 0xbc, 0x3f, 0x2c, 0xb0,
-	0xc7, 0x55, 0x6e, 0x44, 0xd7, 0x7a, 0xe3, 0xe8, 0x4e, 0x45, 0xa6, 0x70, 0xbb, 0xc8, 0xdc, 0x81,
-	0x32, 0xe3, 0x14, 0x87, 0x43, 0x99, 0xa3, 0x62, 0xa0, 0x25, 0xd1, 0x4f, 0x86, 0x6c, 0x20, 0x33,
-	0x54, 0x0f, 0xc4, 0xd1, 0xf3, 0xa0, 0xde, 0x3e, 0xe3, 0x98, 0xed, 0x63, 0x26, 0x96, 0x0b, 0x91,
-	0xdb, 0x7e, 0xc8, 0x43, 0x79, 0x8f, 0x7a, 0x20, 0xcf, 0xde, 0x3d, 0x40, 0x7b, 0x11, 0xe3, 0xcf,
-	0x09, 0x3d, 0xc1, 0x94, 0xcd, 0xdb, 0x03, 0x8b, 0xc6, 0x1e, 0xb8, 0x0f, 0xef, 0x4c, 0xa1, 0x75,
-	0x97, 0xfa, 0x62, 0x66, 0x13, 0x9c, 0xd3, 0x6d, 0x94, 0xc9, 0xcc, 0x2a, 0xf8, 0xa7, 0x05, 0x75,
-	0xf3, 0x41, 0xae, 0xb2, 0xdb, 0x50, 0xde, 0x0b, 0xbb, 0x38, 0xce, 0xda, 0xd8, 0xdd, 0xab, 0x89,
-	0x7d, 0x05, 0x56, 0x7d, 0x5c, 0x5b, 0xa2, 0x0d, 0xb0, 0xd3, 0x38, 0xe4, 0xc7, 0x84, 0x0e, 0xb3,
-	0xae, 0x56, 0x17, 0x7b, 0xd0, 0x81, 0x56, 0xea, 0x31, 0x3e, 0x01, 0xb9, 0x5f, 0x41, 0xcd, 0x20,
-	0x5a, 0xa4, 0xcb, 0x6f, 0xfe, 0x5b, 0x84, 0xca, 0xb6, 0xfa, 0x1b, 0x80, 0x9e, 0x81, 0x3d, 0x5e,
-	0x9a, 0x91, 0x97, 0xf7, 0x7c, 0x76, 0xfb, 0x76, 0x3f, 0xbe, 0x12, 0xa3, 0x63, 0xfd, 0x0d, 0x2c,
-	0xc9, 0x35, 0x1e, 0xcd, 0x09, 0xb2, 0xb9, 0xdf, 0xbb, 0x57, 0xaf, 0xe3, 0x1b, 0x96, 0x60, 0x92,
-	0xf3, 0x70, 0x1e, 0x93, 0xb9, 0x38, 0xb9, 0xeb, 0xd7, 0x0c, 0x52, 0xb4, 0x0f, 0x65, 0xdd, 0x9a,
-	0xe6, 0x41, 0xcd, 0xa9, 0xe7, 0x36, 0x2e, 0x07, 0x28, 0xb2, 0x0d, 0x0b, 0xed, 0x8f, 0xb7, 0xc2,
-	0x79, 0xae, 0x99, 0x25, 0xed, 0x5e, 0xf3, 0xbc, 0x69, 0x6d, 0x58, 0xe8, 0x07, 0xa8, 0x19, 0x45,
-	0x8b, 0x3e, 0xc9, 0x9b, 0xe4, 0xbf, 0x00, 0xf7, 0xd3, 0x6b, 0x50, 0xca, 0xd9, 0x76, 0xfd, 0xd5,
-	0xc5, 0x9a, 0xf5, 0xd7, 0xc5, 0x9a, 0xf5, 0xcf, 0xc5, 0x9a, 0xd5, 0x2d, 0xcb, 0x6f, 0xf8, 0xb3,
-	0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x86, 0xd4, 0x0f, 0xa1, 0x0a, 0x0e, 0x00, 0x00,
+	// 1176 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x4d, 0x6f, 0x23, 0x45,
+	0x13, 0x7e, 0xc7, 0x76, 0xfc, 0x51, 0x76, 0xa2, 0xbc, 0x0d, 0xac, 0x46, 0x03, 0x24, 0x66, 0x00,
+	0xc9, 0x5a, 0xed, 0xce, 0x64, 0x03, 0x2b, 0xa1, 0x08, 0xad, 0x76, 0x1d, 0x2f, 0x22, 0x51, 0x22,
+	0x96, 0xce, 0x86, 0x95, 0xb8, 0x8d, 0xed, 0x8e, 0x77, 0x14, 0x7b, 0x7a, 0xe8, 0xee, 0x09, 0x6b,
+	0x7e, 0x05, 0x07, 0xfe, 0x09, 0x07, 0xce, 0x1c, 0x90, 0xf6, 0xc8, 0x99, 0x43, 0x16, 0xe5, 0x0e,
+	0xbf, 0x01, 0xf5, 0xc7, 0xd8, 0xed, 0xd8, 0xf9, 0xdc, 0x53, 0xba, 0x2a, 0x4f, 0x3d, 0x53, 0x5d,
+	0x4f, 0xb9, 0xab, 0x60, 0xb9, 0x47, 0x13, 0xc1, 0xe8, 0x30, 0x48, 0x19, 0x15, 0x14, 0xad, 0x8e,
+	0x68, 0x77, 0x1c, 0x74, 0xb3, 0x78, 0xd8, 0x3f, 0x8e, 0x45, 0x70, 0xf2, 0xc0, 0xbb, 0x3f, 0x88,
+	0xc5, 0xcb, 0xac, 0x1b, 0xf4, 0xe8, 0x28, 0x1c, 0xd0, 0x01, 0x0d, 0x15, 0xb0, 0x9b, 0x1d, 0x29,
+	0x4b, 0x19, 0xea, 0xa4, 0x09, 0xbc, 0xf5, 0x01, 0xa5, 0x83, 0x21, 0x99, 0xa2, 0x44, 0x3c, 0x22,
+	0x5c, 0x44, 0xa3, 0xd4, 0x00, 0xee, 0x59, 0x7c, 0xf2, 0x63, 0x61, 0xfe, 0xb1, 0x90, 0xd3, 0xe1,
+	0x09, 0x61, 0x61, 0xda, 0x0d, 0x69, 0xca, 0x0d, 0x3a, 0xbc, 0x10, 0x1d, 0xa5, 0x71, 0x28, 0xc6,
+	0x29, 0xe1, 0xe1, 0x8f, 0x94, 0x1d, 0x13, 0xa6, 0x03, 0xfc, 0x15, 0x68, 0x3c, 0x63, 0x59, 0x42,
+	0x30, 0xf9, 0x21, 0x23, 0x5c, 0xf8, 0x77, 0x61, 0xb5, 0x13, 0xf3, 0xe3, 0x43, 0x1e, 0x0d, 0x72,
+	0x1f, 0xba, 0x03, 0xe5, 0xa3, 0x78, 0x28, 0x08, 0x73, 0x9d, 0xa6, 0xd3, 0xaa, 0x61, 0x63, 0xf9,
+	0xbb, 0xf0, 0x7f, 0x0b, 0xcb, 0x53, 0x9a, 0x70, 0x82, 0x1e, 0x42, 0x99, 0x91, 0x1e, 0x65, 0x7d,
+	0xd7, 0x69, 0x16, 0x5b, 0xf5, 0xcd, 0x0f, 0x83, 0xf3, 0x25, 0x0a, 0x4c, 0x80, 0x04, 0x61, 0x03,
+	0xf6, 0x7f, 0x2f, 0x40, 0xdd, 0xf2, 0xa3, 0x15, 0x28, 0xec, 0x74, 0xcc, 0xf7, 0x0a, 0x3b, 0x1d,
+	0xe4, 0x42, 0x65, 0x3f, 0x13, 0x51, 0x77, 0x48, 0xdc, 0x42, 0xd3, 0x69, 0x55, 0x71, 0x6e, 0xa2,
+	0x77, 0x61, 0x69, 0x27, 0x39, 0xe4, 0xc4, 0x2d, 0x2a, 0xbf, 0x36, 0x10, 0x82, 0xd2, 0x41, 0xfc,
+	0x13, 0x71, 0x4b, 0x4d, 0xa7, 0x55, 0xc4, 0xea, 0x2c, 0xef, 0xf1, 0x2c, 0x62, 0x24, 0x11, 0xee,
+	0x92, 0xbe, 0x87, 0xb6, 0x50, 0x1b, 0x6a, 0xdb, 0x8c, 0x44, 0x82, 0xf4, 0x9f, 0x08, 0xb7, 0xdc,
+	0x74, 0x5a, 0xf5, 0x4d, 0x2f, 0xd0, 0xba, 0x04, 0xb9, 0x2e, 0xc1, 0xf3, 0x5c, 0x97, 0x76, 0xf5,
+	0xf5, 0xe9, 0xfa, 0xff, 0x7e, 0x7e, 0xb3, 0xee, 0xe0, 0x69, 0x18, 0x7a, 0x0c, 0xb0, 0x17, 0x71,
+	0x71, 0xc8, 0x15, 0x49, 0xe5, 0x4a, 0x92, 0x92, 0x22, 0xb0, 0x62, 0xd0, 0x1a, 0x80, 0x2a, 0xc0,
+	0x36, 0xcd, 0x12, 0xe1, 0x56, 0x55, 0xde, 0x96, 0x07, 0x35, 0xa1, 0xde, 0x21, 0xbc, 0xc7, 0xe2,
+	0x54, 0xc4, 0x34, 0x71, 0x6b, 0xea, 0x0a, 0xb6, 0xcb, 0xff, 0xa5, 0x04, 0x8d, 0x03, 0xd9, 0x14,
+	0xb9, 0x70, 0xab, 0x50, 0xc4, 0xe4, 0xc8, 0x54, 0x51, 0x1e, 0x51, 0x00, 0xd0, 0x21, 0x47, 0x71,
+	0x12, 0x2b, 0x8e, 0x82, 0x4a, 0x73, 0x25, 0x48, 0xbb, 0xc1, 0xd4, 0x8b, 0x2d, 0x04, 0xf2, 0xa0,
+	0xfa, 0xf4, 0x55, 0x4a, 0x99, 0x14, 0xbf, 0xa8, 0x68, 0x26, 0x36, 0x7a, 0x01, 0xcb, 0xf9, 0xf9,
+	0x89, 0x10, 0x8c, 0xbb, 0x25, 0x25, 0xf8, 0x83, 0x79, 0xc1, 0xed, 0xa4, 0x82, 0x99, 0x98, 0xa7,
+	0x89, 0x60, 0x63, 0x3c, 0xcb, 0x23, 0xb5, 0x3e, 0x20, 0x9c, 0xcb, 0x0c, 0xb5, 0x50, 0xb9, 0x29,
+	0xd3, 0xf9, 0x8a, 0xd1, 0x44, 0x90, 0xa4, 0xaf, 0x84, 0xaa, 0xe1, 0x89, 0x2d, 0xd3, 0xc9, 0xcf,
+	0x3a, 0x9d, 0xca, 0xb5, 0xd2, 0x99, 0x89, 0x31, 0xe9, 0xcc, 0xf8, 0xd0, 0x16, 0x2c, 0x6d, 0x47,
+	0xbd, 0x97, 0x44, 0x69, 0x52, 0xdf, 0x5c, 0x9b, 0x27, 0x54, 0xff, 0xfe, 0x46, 0x89, 0xc0, 0xdb,
+	0x25, 0xd9, 0x1e, 0x58, 0x87, 0x78, 0x8f, 0x01, 0xcd, 0xdf, 0x57, 0xea, 0x72, 0x4c, 0xc6, 0xb9,
+	0x2e, 0xc7, 0x64, 0x2c, 0x9b, 0xf8, 0x24, 0x1a, 0x66, 0xba, 0xb9, 0x6b, 0x58, 0x1b, 0x5b, 0x85,
+	0x2f, 0x1c, 0xc9, 0x30, 0x9f, 0xe2, 0x4d, 0x18, 0xfc, 0x37, 0x0e, 0x34, 0xec, 0x0c, 0xd1, 0x07,
+	0x50, 0xd3, 0x49, 0x4d, 0x9b, 0x63, 0xea, 0x90, 0x7d, 0xb8, 0x33, 0x32, 0x06, 0x77, 0x0b, 0xcd,
+	0x62, 0xab, 0x86, 0x2d, 0x0f, 0xfa, 0x16, 0xea, 0x1a, 0xac, 0xab, 0x5c, 0x54, 0x55, 0x0e, 0x2f,
+	0x2f, 0x4a, 0x60, 0x45, 0xe8, 0x1a, 0xdb, 0x1c, 0xde, 0x23, 0x58, 0x3d, 0x0f, 0xb8, 0xd1, 0x0d,
+	0x7f, 0x73, 0x60, 0xd9, 0x88, 0x6a, 0x5e, 0xa1, 0x28, 0x67, 0x24, 0x2c, 0xf7, 0x99, 0xf7, 0xe8,
+	0xe1, 0x85, 0xfd, 0xa0, 0x61, 0xc1, 0xf9, 0x38, 0x9d, 0xef, 0x1c, 0x9d, 0xb7, 0x0d, 0xef, 0x2d,
+	0x84, 0xde, 0x28, 0xf3, 0x8f, 0x60, 0xf9, 0x40, 0x44, 0x22, 0xe3, 0x17, 0xfe, 0x64, 0xfd, 0x5f,
+	0x1d, 0x58, 0xc9, 0x31, 0xe6, 0x76, 0x9f, 0x43, 0xf5, 0x84, 0x30, 0x41, 0x5e, 0x11, 0x6e, 0x6e,
+	0xe5, 0xce, 0xdf, 0xea, 0x3b, 0x85, 0xc0, 0x13, 0x24, 0xda, 0x82, 0x2a, 0x57, 0x3c, 0x44, 0xcb,
+	0xba, 0xb0, 0x95, 0x75, 0x94, 0xf9, 0xde, 0x04, 0x8f, 0x42, 0x28, 0x0d, 0xe9, 0x20, 0x57, 0xfb,
+	0xfd, 0x8b, 0xe2, 0xf6, 0xe8, 0x00, 0x2b, 0xa0, 0x7f, 0x5a, 0x80, 0xb2, 0xf6, 0xa1, 0x5d, 0x28,
+	0xf7, 0xe3, 0x01, 0xe1, 0x42, 0xdf, 0xaa, 0xbd, 0x29, 0x7f, 0x20, 0x7f, 0x9d, 0xae, 0xdf, 0xb5,
+	0x66, 0x15, 0x4d, 0x49, 0x22, 0x27, 0x6b, 0x14, 0x27, 0x84, 0xf1, 0x70, 0x40, 0xef, 0xeb, 0x90,
+	0xa0, 0xa3, 0xfe, 0x60, 0xc3, 0x20, 0xb9, 0xe2, 0x24, 0xcd, 0x84, 0x69, 0xcc, 0xdb, 0x71, 0x69,
+	0x06, 0x39, 0x22, 0x92, 0x68, 0x44, 0xcc, 0xbb, 0xa6, 0xce, 0x72, 0x44, 0xf4, 0x64, 0xdf, 0xf6,
+	0xd5, 0xe0, 0xa8, 0x62, 0x63, 0xa1, 0x2d, 0xa8, 0x70, 0x11, 0x31, 0x41, 0xfa, 0xea, 0x49, 0xba,
+	0xce, 0xdb, 0x9e, 0x07, 0xa0, 0x47, 0x50, 0xeb, 0xd1, 0x51, 0x3a, 0x24, 0x32, 0xba, 0x7c, 0xcd,
+	0xe8, 0x69, 0x88, 0xec, 0x1e, 0xc2, 0x18, 0x65, 0x6a, 0xaa, 0xd4, 0xb0, 0x36, 0xfc, 0x7f, 0x0b,
+	0xd0, 0xb0, 0xc5, 0x9a, 0x9b, 0x98, 0xbb, 0x50, 0xd6, 0xd2, 0xeb, 0xae, 0xbb, 0x5d, 0xa9, 0x34,
+	0xc3, 0xc2, 0x52, 0xb9, 0x50, 0xe9, 0x65, 0x4c, 0x8d, 0x53, 0x3d, 0x64, 0x73, 0x53, 0x26, 0x2c,
+	0xa8, 0x88, 0x86, 0xaa, 0x54, 0x45, 0xac, 0x0d, 0x39, 0x65, 0x27, 0xbb, 0xcd, 0xcd, 0xa6, 0xec,
+	0x24, 0xcc, 0x96, 0xa1, 0xf2, 0x56, 0x32, 0x54, 0x6f, 0x2c, 0x83, 0xff, 0x87, 0x03, 0xb5, 0x49,
+	0x97, 0x5b, 0xd5, 0x75, 0xde, 0xba, 0xba, 0x33, 0x95, 0x29, 0xdc, 0xae, 0x32, 0x77, 0xa0, 0xcc,
+	0x05, 0x23, 0xd1, 0x48, 0x69, 0x54, 0xc4, 0xc6, 0x92, 0xef, 0xc9, 0x88, 0x0f, 0x94, 0x42, 0x0d,
+	0x2c, 0x8f, 0xbe, 0x0f, 0x8d, 0xf6, 0x58, 0x10, 0xbe, 0x4f, 0xb8, 0x5c, 0x2e, 0xa4, 0xb6, 0xfd,
+	0x48, 0x44, 0xea, 0x1e, 0x0d, 0xac, 0xce, 0xfe, 0x3d, 0x40, 0x7b, 0x31, 0x17, 0x2f, 0xd4, 0xa6,
+	0xc8, 0x17, 0xed, 0x81, 0x45, 0x6b, 0x0f, 0x3c, 0x80, 0x77, 0x66, 0xd0, 0xe6, 0x95, 0xfa, 0xf2,
+	0xdc, 0x26, 0xf8, 0xc9, 0xfc, 0xab, 0xa1, 0x16, 0xd2, 0x40, 0x07, 0xce, 0x2e, 0x84, 0x9b, 0xff,
+	0x14, 0xa1, 0xb2, 0xad, 0x77, 0x6d, 0xf4, 0x1c, 0x6a, 0x93, 0x45, 0x13, 0xf9, 0xf3, 0x34, 0xe7,
+	0x37, 0x56, 0xef, 0xe3, 0x4b, 0x31, 0x26, 0xbf, 0xaf, 0x61, 0x49, 0xad, 0xbe, 0x68, 0xc1, 0x33,
+	0x68, 0xef, 0xc4, 0xde, 0xe5, 0x2b, 0xec, 0x86, 0x23, 0x99, 0xd4, 0x0c, 0x59, 0xc4, 0x64, 0x2f,
+	0x1b, 0xde, 0xfa, 0x15, 0xc3, 0x07, 0xed, 0x43, 0xd9, 0xfc, 0x9c, 0x17, 0x41, 0xed, 0x49, 0xe1,
+	0x35, 0x2f, 0x06, 0x68, 0xb2, 0x0d, 0x07, 0xed, 0x4f, 0x36, 0xa9, 0x45, 0xa9, 0xd9, 0x6d, 0xe0,
+	0x5d, 0xf1, 0xff, 0x96, 0xb3, 0xe1, 0xa0, 0xef, 0xa1, 0x6e, 0x09, 0x8d, 0x16, 0x08, 0x3a, 0xdf,
+	0x35, 0xde, 0xa7, 0x57, 0xa0, 0x74, 0xb2, 0xed, 0xc6, 0xeb, 0xb3, 0x35, 0xe7, 0xcf, 0xb3, 0x35,
+	0xe7, 0xef, 0xb3, 0x35, 0xa7, 0x5b, 0x56, 0x7d, 0xff, 0xd9, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff,
+	0xe1, 0xef, 0xcc, 0xf5, 0x6f, 0x0d, 0x00, 0x00,
 }

+ 7 - 8
vendor/github.com/moby/buildkit/api/services/control/control.proto

@@ -2,9 +2,13 @@ syntax = "proto3";
 
 package moby.buildkit.v1;
 
+// The control API is currently considered experimental and may break in a backwards
+// incompatible way.
+
 import "github.com/gogo/protobuf/gogoproto/gogo.proto";
 import "google/protobuf/timestamp.proto";
 import "github.com/moby/buildkit/solver/pb/ops.proto";
+import "github.com/moby/buildkit/api/types/worker.proto";
 
 option (gogoproto.sizer_all) = true;
 option (gogoproto.marshaler_all) = true;
@@ -17,6 +21,7 @@ service Control {
 	rpc Status(StatusRequest) returns (stream StatusResponse);
 	rpc Session(stream BytesMessage) returns (stream BytesMessage);
 	rpc ListWorkers(ListWorkersRequest) returns (ListWorkersResponse);
+	// rpc Info(InfoRequest) returns (InfoResponse);
 }
 
 message PruneRequest {
@@ -112,11 +117,5 @@ message ListWorkersRequest {
 }
 
 message ListWorkersResponse {
-	repeated WorkerRecord record = 1;
-}
-
-message WorkerRecord {
-	string ID = 1;
-	map<string, string> Labels = 2;
-	repeated pb.Platform platforms = 3 [(gogoproto.nullable) = false];
-}
+	repeated moby.buildkit.v1.types.WorkerRecord record = 1;
+}

+ 3 - 0
vendor/github.com/moby/buildkit/api/types/generate.go

@@ -0,0 +1,3 @@
+package moby_buildkit_v1_types
+
+//go:generate protoc -I=. -I=../../vendor/ -I=../../../../../ --gogo_out=plugins=grpc:. worker.proto

+ 523 - 0
vendor/github.com/moby/buildkit/api/types/worker.pb.go

@@ -0,0 +1,523 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: worker.proto
+
+/*
+	Package moby_buildkit_v1_types is a generated protocol buffer package.
+
+	It is generated from these files:
+		worker.proto
+
+	It has these top-level messages:
+		WorkerRecord
+*/
+package moby_buildkit_v1_types
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import pb "github.com/moby/buildkit/solver/pb"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type WorkerRecord struct {
+	ID        string            `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	Labels    map[string]string `protobuf:"bytes,2,rep,name=Labels" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Platforms []pb.Platform     `protobuf:"bytes,3,rep,name=platforms" json:"platforms"`
+}
+
+func (m *WorkerRecord) Reset()                    { *m = WorkerRecord{} }
+func (m *WorkerRecord) String() string            { return proto.CompactTextString(m) }
+func (*WorkerRecord) ProtoMessage()               {}
+func (*WorkerRecord) Descriptor() ([]byte, []int) { return fileDescriptorWorker, []int{0} }
+
+func (m *WorkerRecord) GetID() string {
+	if m != nil {
+		return m.ID
+	}
+	return ""
+}
+
+func (m *WorkerRecord) GetLabels() map[string]string {
+	if m != nil {
+		return m.Labels
+	}
+	return nil
+}
+
+func (m *WorkerRecord) GetPlatforms() []pb.Platform {
+	if m != nil {
+		return m.Platforms
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*WorkerRecord)(nil), "moby.buildkit.v1.types.WorkerRecord")
+}
+func (m *WorkerRecord) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WorkerRecord) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintWorker(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	if len(m.Labels) > 0 {
+		for k, _ := range m.Labels {
+			dAtA[i] = 0x12
+			i++
+			v := m.Labels[k]
+			mapSize := 1 + len(k) + sovWorker(uint64(len(k))) + 1 + len(v) + sovWorker(uint64(len(v)))
+			i = encodeVarintWorker(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintWorker(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintWorker(dAtA, i, uint64(len(v)))
+			i += copy(dAtA[i:], v)
+		}
+	}
+	if len(m.Platforms) > 0 {
+		for _, msg := range m.Platforms {
+			dAtA[i] = 0x1a
+			i++
+			i = encodeVarintWorker(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func encodeVarintWorker(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *WorkerRecord) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovWorker(uint64(l))
+	}
+	if len(m.Labels) > 0 {
+		for k, v := range m.Labels {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovWorker(uint64(len(k))) + 1 + len(v) + sovWorker(uint64(len(v)))
+			n += mapEntrySize + 1 + sovWorker(uint64(mapEntrySize))
+		}
+	}
+	if len(m.Platforms) > 0 {
+		for _, e := range m.Platforms {
+			l = e.Size()
+			n += 1 + l + sovWorker(uint64(l))
+		}
+	}
+	return n
+}
+
+func sovWorker(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozWorker(x uint64) (n int) {
+	return sovWorker(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *WorkerRecord) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowWorker
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WorkerRecord: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WorkerRecord: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowWorker
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthWorker
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowWorker
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthWorker
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Labels == nil {
+				m.Labels = make(map[string]string)
+			}
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowWorker
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowWorker
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthWorker
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowWorker
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthWorker
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipWorker(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthWorker
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Labels[mapkey] = mapvalue
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Platforms", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowWorker
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthWorker
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Platforms = append(m.Platforms, pb.Platform{})
+			if err := m.Platforms[len(m.Platforms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipWorker(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthWorker
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipWorker(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowWorker
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowWorker
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowWorker
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthWorker
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowWorker
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipWorker(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthWorker = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowWorker   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("worker.proto", fileDescriptorWorker) }
+
+var fileDescriptorWorker = []byte{
+	// 273 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x8f, 0x41, 0x4b, 0xf3, 0x40,
+	0x10, 0x86, 0xbf, 0x4d, 0x3e, 0x0b, 0xdd, 0x06, 0x91, 0x45, 0x24, 0xe4, 0x10, 0x8b, 0xa7, 0x1e,
+	0x74, 0xb6, 0xea, 0x45, 0x3d, 0x96, 0x0a, 0x16, 0x3c, 0x48, 0x2e, 0x9e, 0xb3, 0xed, 0x36, 0x86,
+	0x24, 0xce, 0xb2, 0xd9, 0x44, 0xf2, 0x0f, 0x7b, 0xf4, 0xe2, 0x55, 0x24, 0xbf, 0x44, 0xba, 0x89,
+	0x98, 0x83, 0xb7, 0x79, 0x87, 0x67, 0x1e, 0xde, 0xa1, 0xde, 0x1b, 0xea, 0x4c, 0x6a, 0x50, 0x1a,
+	0x0d, 0xb2, 0x93, 0x02, 0x45, 0x03, 0xa2, 0x4a, 0xf3, 0x4d, 0x96, 0x1a, 0xa8, 0x2f, 0xc1, 0x34,
+	0x4a, 0x96, 0xc1, 0x45, 0x92, 0x9a, 0x97, 0x4a, 0xc0, 0x1a, 0x0b, 0x9e, 0x60, 0x82, 0xdc, 0xe2,
+	0xa2, 0xda, 0xda, 0x64, 0x83, 0x9d, 0x3a, 0x4d, 0x70, 0x3e, 0xc0, 0xf7, 0x46, 0xfe, 0x63, 0xe4,
+	0x25, 0xe6, 0xb5, 0xd4, 0x5c, 0x09, 0x8e, 0xaa, 0xec, 0xe8, 0xb3, 0x0f, 0x42, 0xbd, 0x67, 0xdb,
+	0x22, 0x92, 0x6b, 0xd4, 0x1b, 0x76, 0x48, 0x9d, 0xd5, 0xd2, 0x27, 0x53, 0x32, 0x1b, 0x47, 0xce,
+	0x6a, 0xc9, 0x1e, 0xe8, 0xe8, 0x31, 0x16, 0x32, 0x2f, 0x7d, 0x67, 0xea, 0xce, 0x26, 0x57, 0x73,
+	0xf8, 0xbb, 0x26, 0x0c, 0x2d, 0xd0, 0x9d, 0xdc, 0xbf, 0x1a, 0xdd, 0x44, 0xfd, 0x3d, 0x9b, 0xd3,
+	0xb1, 0xca, 0x63, 0xb3, 0x45, 0x5d, 0x94, 0xbe, 0x6b, 0x65, 0x1e, 0x28, 0x01, 0x4f, 0xfd, 0x72,
+	0xf1, 0x7f, 0xf7, 0x79, 0xfa, 0x2f, 0xfa, 0x85, 0x82, 0x5b, 0x3a, 0x19, 0x88, 0xd8, 0x11, 0x75,
+	0x33, 0xd9, 0xf4, 0xdd, 0xf6, 0x23, 0x3b, 0xa6, 0x07, 0x75, 0x9c, 0x57, 0xd2, 0x77, 0xec, 0xae,
+	0x0b, 0x77, 0xce, 0x0d, 0x59, 0x78, 0xbb, 0x36, 0x24, 0xef, 0x6d, 0x48, 0xbe, 0xda, 0x90, 0x88,
+	0x91, 0x7d, 0xf6, 0xfa, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xa9, 0x5c, 0x8f, 0x26, 0x71, 0x01, 0x00,
+	0x00,
+}

+ 16 - 0
vendor/github.com/moby/buildkit/api/types/worker.proto

@@ -0,0 +1,16 @@
+syntax = "proto3";
+
+package moby.buildkit.v1.types;
+
+import "github.com/gogo/protobuf/gogoproto/gogo.proto";
+import "github.com/moby/buildkit/solver/pb/ops.proto";
+
+option (gogoproto.sizer_all) = true;
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+message WorkerRecord {
+	string ID = 1;
+	map<string, string> Labels = 2;
+	repeated pb.Platform platforms = 3 [(gogoproto.nullable) = false];
+}

+ 1 - 1
vendor/github.com/moby/buildkit/cache/manager.go

@@ -225,7 +225,7 @@ func (cm *cacheManager) New(ctx context.Context, s ImmutableRef, opts ...RefOpti
 		if err != nil {
 			return nil, err
 		}
-		if err := parent.Finalize(ctx); err != nil {
+		if err := parent.Finalize(ctx, true); err != nil {
 			return nil, err
 		}
 		parentID = parent.ID()

+ 12 - 5
vendor/github.com/moby/buildkit/cache/refs.go

@@ -25,7 +25,7 @@ type Ref interface {
 type ImmutableRef interface {
 	Ref
 	Parent() ImmutableRef
-	Finalize(ctx context.Context) error // Make sure reference is flushed to driver
+	Finalize(ctx context.Context, commit bool) error // Make sure reference is flushed to driver
 	Clone() ImmutableRef
 }
 
@@ -148,7 +148,7 @@ func (cr *cacheRecord) Mount(ctx context.Context, readonly bool) (snapshot.Mount
 		return setReadonly(m), nil
 	}
 
-	if err := cr.finalize(ctx); err != nil {
+	if err := cr.finalize(ctx, true); err != nil {
 		return nil, err
 	}
 	if cr.viewMount == nil { // TODO: handle this better
@@ -233,22 +233,29 @@ func (sr *immutableRef) release(ctx context.Context) error {
 	return nil
 }
 
-func (sr *immutableRef) Finalize(ctx context.Context) error {
+func (sr *immutableRef) Finalize(ctx context.Context, b bool) error {
 	sr.mu.Lock()
 	defer sr.mu.Unlock()
 
-	return sr.finalize(ctx)
+	return sr.finalize(ctx, b)
 }
 
 func (cr *cacheRecord) Metadata() *metadata.StorageItem {
 	return cr.md
 }
 
-func (cr *cacheRecord) finalize(ctx context.Context) error {
+func (cr *cacheRecord) finalize(ctx context.Context, commit bool) error {
 	mutable := cr.equalMutable
 	if mutable == nil {
 		return nil
 	}
+	if !commit {
+		if HasCachePolicyRetain(mutable) {
+			CachePolicyRetain(mutable)
+			return mutable.Metadata().Commit()
+		}
+		return nil
+	}
 	err := cr.cm.Snapshotter.Commit(ctx, cr.ID(), mutable.ID())
 	if err != nil {
 		return errors.Wrapf(err, "failed to commit %s", mutable.ID())

+ 41 - 56
vendor/github.com/moby/buildkit/cache/remotecache/export.go

@@ -10,35 +10,54 @@ import (
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/images"
 	v1 "github.com/moby/buildkit/cache/remotecache/v1"
-	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/solver"
 	"github.com/moby/buildkit/util/contentutil"
 	"github.com/moby/buildkit/util/progress"
-	"github.com/moby/buildkit/util/push"
 	digest "github.com/opencontainers/go-digest"
 	specs "github.com/opencontainers/image-spec/specs-go"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 )
 
-type ExporterOpt struct {
-	SessionManager *session.Manager
+type ResolveCacheExporterFunc func(ctx context.Context, typ, target string) (Exporter, error)
+
+func oneOffProgress(ctx context.Context, id string) func(err error) error {
+	pw, _, _ := progress.FromContext(ctx)
+	now := time.Now()
+	st := progress.Status{
+		Started: &now,
+	}
+	pw.Write(id, st)
+	return func(err error) error {
+		now := time.Now()
+		st.Completed = &now
+		pw.Write(id, st)
+		pw.Close()
+		return err
+	}
 }
 
-func NewCacheExporter(opt ExporterOpt) *CacheExporter {
-	return &CacheExporter{opt: opt}
+type Exporter interface {
+	solver.CacheExporterTarget
+	Finalize(ctx context.Context) error
 }
 
-type CacheExporter struct {
-	opt ExporterOpt
+type contentCacheExporter struct {
+	solver.CacheExporterTarget
+	chains   *v1.CacheChains
+	ingester content.Ingester
 }
 
-func (ce *CacheExporter) ExporterForTarget(target string) *RegistryCacheExporter {
+func NewExporter(ingester content.Ingester) Exporter {
 	cc := v1.NewCacheChains()
-	return &RegistryCacheExporter{target: target, CacheExporterTarget: cc, chains: cc, exporter: ce}
+	return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester}
 }
 
-func (ce *CacheExporter) Finalize(ctx context.Context, cc *v1.CacheChains, target string) error {
+func (ce *contentCacheExporter) Finalize(ctx context.Context) error {
+	return export(ctx, ce.ingester, ce.chains)
+}
+
+func export(ctx context.Context, ingester content.Ingester, cc *v1.CacheChains) error {
 	config, descs, err := cc.Marshal()
 	if err != nil {
 		return err
@@ -58,19 +77,16 @@ func (ce *CacheExporter) Finalize(ctx context.Context, cc *v1.CacheChains, targe
 	mfst.SchemaVersion = 2
 	mfst.MediaType = images.MediaTypeDockerSchema2ManifestList
 
-	allBlobs := map[digest.Digest]struct{}{}
-	mp := contentutil.NewMultiProvider(nil)
 	for _, l := range config.Layers {
-		if _, ok := allBlobs[l.Blob]; ok {
-			continue
-		}
 		dgstPair, ok := descs[l.Blob]
 		if !ok {
 			return errors.Errorf("missing blob %s", l.Blob)
 		}
-		allBlobs[l.Blob] = struct{}{}
-		mp.Add(l.Blob, dgstPair.Provider)
-
+		layerDone := oneOffProgress(ctx, fmt.Sprintf("writing layer %s", l.Blob))
+		if err := contentutil.Copy(ctx, ingester, dgstPair.Provider, dgstPair.Descriptor); err != nil {
+			return layerDone(errors.Wrap(err, "error writing layer blob"))
+		}
+		layerDone(nil)
 		mfst.Manifests = append(mfst.Manifests, dgstPair.Descriptor)
 	}
 
@@ -85,13 +101,11 @@ func (ce *CacheExporter) Finalize(ctx context.Context, cc *v1.CacheChains, targe
 		MediaType: v1.CacheConfigMediaTypeV0,
 	}
 	configDone := oneOffProgress(ctx, fmt.Sprintf("writing config %s", dgst))
-	buf := contentutil.NewBuffer()
-	if err := content.WriteBlob(ctx, buf, dgst.String(), bytes.NewReader(dt), desc); err != nil {
+	if err := content.WriteBlob(ctx, ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil {
 		return configDone(errors.Wrap(err, "error writing config blob"))
 	}
 	configDone(nil)
 
-	mp.Add(dgst, buf)
 	mfst.Manifests = append(mfst.Manifests, desc)
 
 	dt, err = json.Marshal(mfst)
@@ -100,44 +114,15 @@ func (ce *CacheExporter) Finalize(ctx context.Context, cc *v1.CacheChains, targe
 	}
 	dgst = digest.FromBytes(dt)
 
-	buf = contentutil.NewBuffer()
 	desc = ocispec.Descriptor{
-		Digest: dgst,
-		Size:   int64(len(dt)),
+		Digest:    dgst,
+		Size:      int64(len(dt)),
+		MediaType: mfst.MediaType,
 	}
 	mfstDone := oneOffProgress(ctx, fmt.Sprintf("writing manifest %s", dgst))
-	if err := content.WriteBlob(ctx, buf, dgst.String(), bytes.NewReader(dt), desc); err != nil {
+	if err := content.WriteBlob(ctx, ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil {
 		return mfstDone(errors.Wrap(err, "error writing manifest blob"))
 	}
 	mfstDone(nil)
-	mp.Add(dgst, buf)
-
-	return push.Push(ctx, ce.opt.SessionManager, mp, dgst, target, false)
-}
-
-type RegistryCacheExporter struct {
-	solver.CacheExporterTarget
-	chains   *v1.CacheChains
-	target   string
-	exporter *CacheExporter
-}
-
-func (ce *RegistryCacheExporter) Finalize(ctx context.Context) error {
-	return ce.exporter.Finalize(ctx, ce.chains, ce.target)
-}
-
-func oneOffProgress(ctx context.Context, id string) func(err error) error {
-	pw, _, _ := progress.FromContext(ctx)
-	now := time.Now()
-	st := progress.Status{
-		Started: &now,
-	}
-	pw.Write(id, st)
-	return func(err error) error {
-		now := time.Now()
-		st.Completed = &now
-		pw.Write(id, st)
-		pw.Close()
-		return err
-	}
+	return nil
 }

+ 38 - 64
vendor/github.com/moby/buildkit/cache/remotecache/import.go

@@ -3,77 +3,34 @@ package remotecache
 import (
 	"context"
 	"encoding/json"
-	"net/http"
-	"time"
+	"io"
 
 	"github.com/containerd/containerd/content"
-	"github.com/containerd/containerd/remotes"
-	"github.com/containerd/containerd/remotes/docker"
 	v1 "github.com/moby/buildkit/cache/remotecache/v1"
-	"github.com/moby/buildkit/session"
-	"github.com/moby/buildkit/session/auth"
 	"github.com/moby/buildkit/solver"
-	"github.com/moby/buildkit/util/contentutil"
 	"github.com/moby/buildkit/worker"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 )
 
-type ImportOpt struct {
-	SessionManager *session.Manager
-	Worker         worker.Worker // TODO: remove. This sets the worker where the cache is imported to. Should be passed on load instead.
-}
+// ResolveCacheImporterFunc returns importer and descriptor.
+// Currently typ needs to be an empty string.
+type ResolveCacheImporterFunc func(ctx context.Context, typ, ref string) (Importer, ocispec.Descriptor, error)
 
-func NewCacheImporter(opt ImportOpt) *CacheImporter {
-	return &CacheImporter{opt: opt}
+type Importer interface {
+	Resolve(ctx context.Context, desc ocispec.Descriptor, id string, w worker.Worker) (solver.CacheManager, error)
 }
 
-type CacheImporter struct {
-	opt ImportOpt
+func NewImporter(provider content.Provider) Importer {
+	return &contentCacheImporter{provider: provider}
 }
 
-func (ci *CacheImporter) getCredentialsFromSession(ctx context.Context) func(string) (string, string, error) {
-	id := session.FromContext(ctx)
-	if id == "" {
-		return nil
-	}
-
-	return func(host string) (string, string, error) {
-		timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
-		defer cancel()
-
-		caller, err := ci.opt.SessionManager.Get(timeoutCtx, id)
-		if err != nil {
-			return "", "", err
-		}
-
-		return auth.CredentialsFunc(context.TODO(), caller)(host)
-	}
+type contentCacheImporter struct {
+	provider content.Provider
 }
 
-func (ci *CacheImporter) Resolve(ctx context.Context, ref string) (solver.CacheManager, error) {
-	resolver := docker.NewResolver(docker.ResolverOptions{
-		Client:      http.DefaultClient,
-		Credentials: ci.getCredentialsFromSession(ctx),
-	})
-
-	ref, desc, err := resolver.Resolve(ctx, ref)
-	if err != nil {
-		return nil, err
-	}
-
-	fetcher, err := resolver.Fetcher(ctx, ref)
-	if err != nil {
-		return nil, err
-	}
-
-	b := contentutil.NewBuffer()
-
-	if _, err := remotes.FetchHandler(b, fetcher)(ctx, desc); err != nil {
-		return nil, err
-	}
-
-	dt, err := content.ReadBlob(ctx, b, desc)
+func (ci *contentCacheImporter) Resolve(ctx context.Context, desc ocispec.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) {
+	dt, err := readBlob(ctx, ci.provider, desc)
 	if err != nil {
 		return nil, err
 	}
@@ -94,19 +51,15 @@ func (ci *CacheImporter) Resolve(ctx context.Context, ref string) (solver.CacheM
 		}
 		allLayers[m.Digest] = v1.DescriptorProviderPair{
 			Descriptor: m,
-			Provider:   contentutil.FromFetcher(fetcher, m),
+			Provider:   ci.provider,
 		}
 	}
 
 	if configDesc.Digest == "" {
-		return nil, errors.Errorf("invalid build cache from %s", ref)
-	}
-
-	if _, err := remotes.FetchHandler(b, fetcher)(ctx, configDesc); err != nil {
-		return nil, err
+		return nil, errors.Errorf("invalid build cache from %+v", desc)
 	}
 
-	dt, err = content.ReadBlob(ctx, b, configDesc)
+	dt, err = readBlob(ctx, ci.provider, configDesc)
 	if err != nil {
 		return nil, err
 	}
@@ -116,9 +69,30 @@ func (ci *CacheImporter) Resolve(ctx context.Context, ref string) (solver.CacheM
 		return nil, err
 	}
 
-	keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, ci.opt.Worker)
+	keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w)
 	if err != nil {
 		return nil, err
 	}
-	return solver.NewCacheManager(ref, keysStorage, resultStorage), nil
+	return solver.NewCacheManager(id, keysStorage, resultStorage), nil
+}
+
+func readBlob(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]byte, error) {
+	maxBlobSize := int64(1 << 20)
+	if desc.Size > maxBlobSize {
+		return nil, errors.Errorf("blob %s is too large (%d > %d)", desc.Digest, desc.Size, maxBlobSize)
+	}
+	dt, err := content.ReadBlob(ctx, provider, desc)
+	if err != nil {
+		// NOTE: even if err == EOF, we might have got expected dt here.
+		// For instance, http.Response.Body is known to return non-zero bytes with EOF.
+		if err == io.EOF {
+			if dtDigest := desc.Digest.Algorithm().FromBytes(dt); dtDigest != desc.Digest {
+				err = errors.Wrapf(err, "got EOF, expected %s (%d bytes), got %s (%d bytes)",
+					desc.Digest, desc.Size, dtDigest, len(dt))
+			} else {
+				err = nil
+			}
+		}
+	}
+	return dt, err
 }

+ 73 - 0
vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go

@@ -0,0 +1,73 @@
+package registry
+
+import (
+	"context"
+	"time"
+
+	"github.com/containerd/containerd/remotes"
+	"github.com/containerd/containerd/remotes/docker"
+	"github.com/moby/buildkit/cache/remotecache"
+	"github.com/moby/buildkit/session"
+	"github.com/moby/buildkit/session/auth"
+	"github.com/moby/buildkit/util/contentutil"
+	"github.com/moby/buildkit/util/tracing"
+	specs "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExporterFunc {
+	return func(ctx context.Context, typ, ref string) (remotecache.Exporter, error) {
+		if typ != "" {
+			return nil, errors.Errorf("unsupported cache exporter type: %s", typ)
+		}
+		remote := newRemoteResolver(ctx, sm)
+		pusher, err := remote.Pusher(ctx, ref)
+		if err != nil {
+			return nil, err
+		}
+		return remotecache.NewExporter(contentutil.FromPusher(pusher)), nil
+	}
+}
+
+func ResolveCacheImporterFunc(sm *session.Manager) remotecache.ResolveCacheImporterFunc {
+	return func(ctx context.Context, typ, ref string) (remotecache.Importer, specs.Descriptor, error) {
+		if typ != "" {
+			return nil, specs.Descriptor{}, errors.Errorf("unsupported cache importer type: %s", typ)
+		}
+		remote := newRemoteResolver(ctx, sm)
+		xref, desc, err := remote.Resolve(ctx, ref)
+		if err != nil {
+			return nil, specs.Descriptor{}, err
+		}
+		fetcher, err := remote.Fetcher(ctx, xref)
+		if err != nil {
+			return nil, specs.Descriptor{}, err
+		}
+		return remotecache.NewImporter(contentutil.FromFetcher(fetcher)), desc, nil
+	}
+}
+
+func newRemoteResolver(ctx context.Context, sm *session.Manager) remotes.Resolver {
+	return docker.NewResolver(docker.ResolverOptions{
+		Client:      tracing.DefaultClient,
+		Credentials: getCredentialsFunc(ctx, sm),
+	})
+}
+
+func getCredentialsFunc(ctx context.Context, sm *session.Manager) func(string) (string, string, error) {
+	id := session.FromContext(ctx)
+	if id == "" {
+		return nil
+	}
+	return func(host string) (string, string, error) {
+		timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+		defer cancel()
+
+		caller, err := sm.Get(timeoutCtx, id)
+		if err != nil {
+			return "", "", err
+		}
+
+		return auth.CredentialsFunc(context.TODO(), caller)(host)
+	}
+}

+ 9 - 0
vendor/github.com/moby/buildkit/client/llb/source.go

@@ -51,6 +51,15 @@ func (s *SourceOp) Marshal(constraints *Constraints) (digest.Digest, []byte, *pb
 		return "", nil, nil, err
 	}
 
+	if strings.HasPrefix(s.id, "local://") {
+		if _, hasSession := s.attrs[pb.AttrLocalSessionID]; !hasSession {
+			uid := s.constraints.LocalUniqueID
+			if uid == "" {
+				uid = constraints.LocalUniqueID
+			}
+			s.attrs[pb.AttrLocalUniqueID] = uid
+		}
+	}
 	proto, md := MarshalConstraints(constraints, &s.constraints)
 
 	proto.Op = &pb.Op_Source{

+ 10 - 1
vendor/github.com/moby/buildkit/client/llb/state.go

@@ -4,6 +4,7 @@ import (
 	"context"
 
 	"github.com/containerd/containerd/platforms"
+	"github.com/moby/buildkit/identity"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/util/system"
 	digest "github.com/opencontainers/go-digest"
@@ -78,7 +79,8 @@ func (s State) Marshal(co ...ConstraintsOpt) (*Definition, error) {
 
 	defaultPlatform := platforms.Normalize(platforms.DefaultSpec())
 	c := &Constraints{
-		Platform: &defaultPlatform,
+		Platform:      &defaultPlatform,
+		LocalUniqueID: identity.NewID(),
 	}
 	for _, o := range append(s.opts, co...) {
 		o.SetConstraintsOption(c)
@@ -358,6 +360,7 @@ type Constraints struct {
 	Platform          *specs.Platform
 	WorkerConstraints []string
 	Metadata          pb.OpMetadata
+	LocalUniqueID     string
 }
 
 func Platform(p specs.Platform) ConstraintsOpt {
@@ -366,6 +369,12 @@ func Platform(p specs.Platform) ConstraintsOpt {
 	})
 }
 
+func LocalUniqueID(v string) ConstraintsOpt {
+	return constraintsOptFunc(func(c *Constraints) {
+		c.LocalUniqueID = v
+	})
+}
+
 var (
 	LinuxAmd64   = Platform(specs.Platform{OS: "linux", Architecture: "amd64"})
 	LinuxArmhf   = Platform(specs.Platform{OS: "linux", Architecture: "arm", Variant: "v7"})

+ 1 - 15
vendor/github.com/moby/buildkit/client/workers.go

@@ -33,7 +33,7 @@ func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]
 		wi = append(wi, &WorkerInfo{
 			ID:        w.ID,
 			Labels:    w.Labels,
-			Platforms: toClientPlatforms(w.Platforms),
+			Platforms: pb.ToSpecPlatforms(w.Platforms),
 		})
 	}
 
@@ -51,17 +51,3 @@ func WithWorkerFilter(f []string) ListWorkersOption {
 		wi.Filter = f
 	}
 }
-
-func toClientPlatforms(p []pb.Platform) []specs.Platform {
-	out := make([]specs.Platform, 0, len(p))
-	for _, pp := range p {
-		out = append(out, specs.Platform{
-			OS:           pp.OS,
-			Architecture: pp.Architecture,
-			Variant:      pp.Variant,
-			OSVersion:    pp.OSVersion,
-			OSFeatures:   pp.OSFeatures,
-		})
-	}
-	return out
-}

+ 20 - 28
vendor/github.com/moby/buildkit/control/control.go

@@ -5,6 +5,7 @@ import (
 
 	"github.com/docker/distribution/reference"
 	controlapi "github.com/moby/buildkit/api/services/control"
+	apitypes "github.com/moby/buildkit/api/types"
 	"github.com/moby/buildkit/cache/remotecache"
 	"github.com/moby/buildkit/client"
 	"github.com/moby/buildkit/exporter"
@@ -15,20 +16,21 @@ import (
 	"github.com/moby/buildkit/solver/llbsolver"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/worker"
-	specs "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"golang.org/x/sync/errgroup"
 	"google.golang.org/grpc"
 )
 
+type ResolveCacheExporterFunc func(ctx context.Context, typ, target string) (remotecache.Exporter, error)
+
 type Opt struct {
-	SessionManager   *session.Manager
-	WorkerController *worker.Controller
-	Frontends        map[string]frontend.Frontend
-	CacheKeyStorage  solver.CacheKeyStorage
-	CacheExporter    *remotecache.CacheExporter
-	CacheImporter    *remotecache.CacheImporter
+	SessionManager           *session.Manager
+	WorkerController         *worker.Controller
+	Frontends                map[string]frontend.Frontend
+	CacheKeyStorage          solver.CacheKeyStorage
+	ResolveCacheExporterFunc remotecache.ResolveCacheExporterFunc
+	ResolveCacheImporterFunc remotecache.ResolveCacheImporterFunc
 }
 
 type Controller struct { // TODO: ControlService
@@ -37,7 +39,7 @@ type Controller struct { // TODO: ControlService
 }
 
 func NewController(opt Opt) (*Controller, error) {
-	solver, err := llbsolver.New(opt.WorkerController, opt.Frontends, opt.CacheKeyStorage, opt.CacheImporter)
+	solver, err := llbsolver.New(opt.WorkerController, opt.Frontends, opt.CacheKeyStorage, opt.ResolveCacheImporterFunc)
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to create solver")
 	}
@@ -103,7 +105,7 @@ func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Contr
 		}(w)
 	}
 
-	eg2, ctx := errgroup.WithContext(stream.Context())
+	eg2, _ := errgroup.WithContext(stream.Context())
 
 	eg2.Go(func() error {
 		defer close(ch)
@@ -154,14 +156,18 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*
 		}
 	}
 
-	var cacheExporter *remotecache.RegistryCacheExporter
-	if ref := req.Cache.ExportRef; ref != "" {
+	var cacheExporter remotecache.Exporter
+	if ref := req.Cache.ExportRef; ref != "" && c.opt.ResolveCacheExporterFunc != nil {
 		parsed, err := reference.ParseNormalizedNamed(ref)
 		if err != nil {
 			return nil, err
 		}
 		exportCacheRef := reference.TagNameOnly(parsed).String()
-		cacheExporter = c.opt.CacheExporter.ExporterForTarget(exportCacheRef)
+		typ := "" // unimplemented yet (typically registry)
+		cacheExporter, err = c.opt.ResolveCacheExporterFunc(ctx, typ, exportCacheRef)
+		if err != nil {
+			return nil, err
+		}
 	}
 
 	var importCacheRefs []string
@@ -269,10 +275,10 @@ func (c *Controller) ListWorkers(ctx context.Context, r *controlapi.ListWorkersR
 		return nil, err
 	}
 	for _, w := range workers {
-		resp.Record = append(resp.Record, &controlapi.WorkerRecord{
+		resp.Record = append(resp.Record, &apitypes.WorkerRecord{
 			ID:        w.ID(),
 			Labels:    w.Labels(),
-			Platforms: toPBPlatforms(w.Platforms()),
+			Platforms: pb.PlatformsFromSpec(w.Platforms()),
 		})
 	}
 	return resp, nil
@@ -296,17 +302,3 @@ func parseCacheExporterOpt(opt map[string]string) solver.CacheExportMode {
 	}
 	return solver.CacheExportModeMin
 }
-
-func toPBPlatforms(p []specs.Platform) []pb.Platform {
-	out := make([]pb.Platform, 0, len(p))
-	for _, pp := range p {
-		out = append(out, pb.Platform{
-			OS:           pp.OS,
-			Architecture: pp.Architecture,
-			Variant:      pp.Variant,
-			OSVersion:    pp.OSVersion,
-			OSFeatures:   pp.OSFeatures,
-		})
-	}
-	return out
-}

+ 7 - 7
vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go

@@ -21,7 +21,7 @@ import (
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/executor/oci"
 	"github.com/moby/buildkit/identity"
-	"github.com/moby/buildkit/util/libcontainer_specconv"
+	rootlessspecconv "github.com/moby/buildkit/util/rootless/specconv"
 	"github.com/moby/buildkit/util/system"
 	"github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/pkg/errors"
@@ -84,6 +84,8 @@ func New(opt Opt) (executor.Executor, error) {
 		LogFormat:    runc.JSON,
 		PdeathSignal: syscall.SIGKILL,
 		Setpgid:      true,
+		// we don't execute runc with --rootless=(true|false) explicitly,
+		// so as to support non-runc runtimes
 	}
 
 	w := &runcExecutor{
@@ -169,13 +171,11 @@ func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache.
 		return errors.Wrapf(err, "failed to create working directory %s", newp)
 	}
 
+	if err := setOOMScoreAdj(spec); err != nil {
+		return err
+	}
 	if w.rootless {
-		specconv.ToRootless(spec, nil)
-		// TODO(AkihiroSuda): keep Cgroups enabled if /sys/fs/cgroup/cpuset/buildkit exists and writable
-		spec.Linux.CgroupsPath = ""
-		// TODO(AkihiroSuda): ToRootless removes netns, but we should readd netns here
-		// if either SUID or userspace NAT is configured on the host.
-		if err := setOOMScoreAdj(spec); err != nil {
+		if err := rootlessspecconv.ToRootless(spec); err != nil {
 			return err
 		}
 	}

+ 53 - 35
vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go

@@ -36,17 +36,21 @@ const (
 var httpPrefix = regexp.MustCompile("^https?://")
 var gitUrlPathWithFragmentSuffix = regexp.MustCompile("\\.git(?:#.+)?$")
 
-func Build(ctx context.Context, c client.Client) error {
-	opts := c.Opts()
+func Build(ctx context.Context, c client.Client) (*client.Result, error) {
+	opts := c.BuildOpts().Opts
 
-	// TODO: read buildPlatforms from workers
-	buildPlatforms := []specs.Platform{platforms.DefaultSpec()}
+	defaultBuildPlatform := platforms.DefaultSpec()
+	if workers := c.BuildOpts().Workers; len(workers) > 0 && len(workers[0].Platforms) > 0 {
+		defaultBuildPlatform = workers[0].Platforms[0]
+	}
+
+	buildPlatforms := []specs.Platform{defaultBuildPlatform}
 	targetPlatform := platforms.DefaultSpec()
 	if v := opts[keyTargetPlatform]; v != "" {
 		var err error
 		targetPlatform, err = platforms.Parse(v)
 		if err != nil {
-			return errors.Wrapf(err, "failed to parse target platform %s", v)
+			return nil, errors.Wrapf(err, "failed to parse target platform %s", v)
 		}
 	}
 
@@ -66,7 +70,7 @@ func Build(ctx context.Context, c client.Client) error {
 
 	src := llb.Local(LocalNameDockerfile,
 		llb.IncludePatterns([]string{filename}),
-		llb.SessionID(c.SessionID()),
+		llb.SessionID(c.BuildOpts().SessionID),
 		llb.SharedKeyHint(defaultDockerfileName),
 	)
 	var buildContext *llb.State
@@ -78,13 +82,18 @@ func Build(ctx context.Context, c client.Client) error {
 		httpContext := llb.HTTP(opts[LocalNameContext], llb.Filename("context"))
 		def, err := httpContext.Marshal()
 		if err != nil {
-			return err
+			return nil, errors.Wrapf(err, "failed to marshal httpcontext")
 		}
-		ref, err := c.Solve(ctx, client.SolveRequest{
+		res, err := c.Solve(ctx, client.SolveRequest{
 			Definition: def.ToPB(),
-		}, nil, false)
+		})
 		if err != nil {
-			return err
+			return nil, errors.Wrapf(err, "failed to resolve httpcontext")
+		}
+
+		ref, err := res.SingleRef()
+		if err != nil {
+			return nil, err
 		}
 
 		dt, err := ref.ReadFile(ctx, client.ReadRequest{
@@ -94,7 +103,7 @@ func Build(ctx context.Context, c client.Client) error {
 			},
 		})
 		if err != nil {
-			return err
+			return nil, errors.Errorf("failed to read downloaded context")
 		}
 		if isArchive(dt) {
 			unpack := llb.Image(dockerfile2llb.CopyImage).
@@ -112,15 +121,20 @@ func Build(ctx context.Context, c client.Client) error {
 
 	def, err := src.Marshal()
 	if err != nil {
-		return err
+		return nil, errors.Wrapf(err, "failed to marshal local source")
 	}
 
 	eg, ctx2 := errgroup.WithContext(ctx)
 	var dtDockerfile []byte
 	eg.Go(func() error {
-		ref, err := c.Solve(ctx2, client.SolveRequest{
+		res, err := c.Solve(ctx2, client.SolveRequest{
 			Definition: def.ToPB(),
-		}, nil, false)
+		})
+		if err != nil {
+			return errors.Wrapf(err, "failed to resolve dockerfile")
+		}
+
+		ref, err := res.SingleRef()
 		if err != nil {
 			return err
 		}
@@ -129,7 +143,7 @@ func Build(ctx context.Context, c client.Client) error {
 			Filename: filename,
 		})
 		if err != nil {
-			return err
+			return errors.Wrapf(err, "failed to read dockerfile")
 		}
 		return nil
 	})
@@ -139,7 +153,7 @@ func Build(ctx context.Context, c client.Client) error {
 			dockerignoreState := buildContext
 			if dockerignoreState == nil {
 				st := llb.Local(LocalNameContext,
-					llb.SessionID(c.SessionID()),
+					llb.SessionID(c.BuildOpts().SessionID),
 					llb.IncludePatterns([]string{dockerignoreFilename}),
 					llb.SharedKeyHint(dockerignoreFilename),
 				)
@@ -149,9 +163,13 @@ func Build(ctx context.Context, c client.Client) error {
 			if err != nil {
 				return err
 			}
-			ref, err := c.Solve(ctx2, client.SolveRequest{
+			res, err := c.Solve(ctx2, client.SolveRequest{
 				Definition: def.ToPB(),
-			}, nil, false)
+			})
+			if err != nil {
+				return err
+			}
+			ref, err := res.SingleRef()
 			if err != nil {
 				return err
 			}
@@ -169,10 +187,10 @@ func Build(ctx context.Context, c client.Client) error {
 	}
 
 	if err := eg.Wait(); err != nil {
-		return err
+		return nil, err
 	}
 
-	if _, ok := c.Opts()["cmdline"]; !ok {
+	if _, ok := opts["cmdline"]; !ok {
 		ref, cmdline, ok := dockerfile2llb.DetectSyntax(bytes.NewBuffer(dtDockerfile))
 		if ok {
 			return forwardGateway(ctx, c, ref, cmdline)
@@ -184,7 +202,7 @@ func Build(ctx context.Context, c client.Client) error {
 		MetaResolver:   c,
 		BuildArgs:      filter(opts, buildArgPrefix),
 		Labels:         filter(opts, labelPrefix),
-		SessionID:      c.SessionID(),
+		SessionID:      c.BuildOpts().SessionID,
 		BuildContext:   buildContext,
 		Excludes:       excludes,
 		IgnoreCache:    ignoreCache,
@@ -193,17 +211,17 @@ func Build(ctx context.Context, c client.Client) error {
 	})
 
 	if err != nil {
-		return err
+		return nil, errors.Wrapf(err, "failed to create LLB definition")
 	}
 
 	def, err = st.Marshal()
 	if err != nil {
-		return err
+		return nil, errors.Wrapf(err, "failed to marshal LLB definition")
 	}
 
 	config, err := json.Marshal(img)
 	if err != nil {
-		return err
+		return nil, errors.Wrapf(err, "failed to marshal image config")
 	}
 
 	var cacheFrom []string
@@ -211,30 +229,30 @@ func Build(ctx context.Context, c client.Client) error {
 		cacheFrom = strings.Split(cacheFromStr, ",")
 	}
 
-	_, err = c.Solve(ctx, client.SolveRequest{
+	res, err := c.Solve(ctx, client.SolveRequest{
 		Definition:      def.ToPB(),
 		ImportCacheRefs: cacheFrom,
-	}, map[string][]byte{
-		exporterImageConfig: config,
-	}, true)
+	})
 	if err != nil {
-		return err
+		return nil, err
 	}
-	return nil
+
+	res.AddMeta(exporterImageConfig, config)
+
+	return res, nil
 }
 
-func forwardGateway(ctx context.Context, c client.Client, ref string, cmdline string) error {
-	opts := c.Opts()
+func forwardGateway(ctx context.Context, c client.Client, ref string, cmdline string) (*client.Result, error) {
+	opts := c.BuildOpts().Opts
 	if opts == nil {
 		opts = map[string]string{}
 	}
 	opts["cmdline"] = cmdline
 	opts["source"] = ref
-	_, err := c.Solve(ctx, client.SolveRequest{
+	return c.Solve(ctx, client.SolveRequest{
 		Frontend:    "gateway.v0",
 		FrontendOpt: opts,
-	}, nil, true)
-	return err
+	})
 }
 
 func filter(opt map[string]string, key string) map[string]string {

+ 0 - 41
vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile.go

@@ -1,41 +0,0 @@
-package dockerfile
-
-import (
-	"context"
-
-	"github.com/moby/buildkit/frontend"
-	"github.com/moby/buildkit/frontend/dockerfile/builder"
-	"github.com/moby/buildkit/solver"
-)
-
-func NewDockerfileFrontend() frontend.Frontend {
-	return &dfFrontend{}
-}
-
-type dfFrontend struct{}
-
-func (f *dfFrontend) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (retRef solver.CachedResult, exporterAttr map[string][]byte, retErr error) {
-
-	c, err := llbBridgeToGatewayClient(ctx, llbBridge, opts)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	defer func() {
-		for _, r := range c.refs {
-			if r != nil && (c.final != r || retErr != nil) {
-				r.Release(context.TODO())
-			}
-		}
-	}()
-
-	if err := builder.Build(ctx, c); err != nil {
-		return nil, nil, err
-	}
-
-	if c.final == nil || c.final.CachedResult == nil {
-		return nil, c.exporterAttr, nil
-	}
-
-	return c.final, c.exporterAttr, nil
-}

+ 24 - 29
vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go

@@ -80,8 +80,9 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
 		return nil, nil, err
 	}
 
-	for i := range metaArgs {
-		metaArgs[i] = setBuildArgValue(metaArgs[i], opt.BuildArgs)
+	optMetaArgs := []instructions.KeyValuePairOptional{}
+	for _, metaArg := range metaArgs {
+		optMetaArgs = append(optMetaArgs, setKVValue(metaArg.KeyValuePairOptional, opt.BuildArgs))
 	}
 
 	shlex := shell.NewLex(dockerfile.EscapeToken)
@@ -95,7 +96,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
 
 	// set base state for every image
 	for _, st := range stages {
-		name, err := shlex.ProcessWord(st.BaseName, toEnvList(metaArgs, nil))
+		name, err := shlex.ProcessWord(st.BaseName, toEnvList(optMetaArgs, nil))
 		if err != nil {
 			return nil, nil, err
 		}
@@ -111,7 +112,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
 		}
 
 		if v := st.Platform; v != "" {
-			v, err := shlex.ProcessWord(v, toEnvList(metaArgs, nil))
+			v, err := shlex.ProcessWord(v, toEnvList(optMetaArgs, nil))
 			if err != nil {
 				return nil, nil, errors.Wrapf(err, "failed to process arguments for platform %s", v)
 			}
@@ -268,7 +269,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
 
 		opt := dispatchOpt{
 			allDispatchStates: allDispatchStates,
-			metaArgs:          metaArgs,
+			metaArgs:          optMetaArgs,
 			buildArgValues:    opt.BuildArgs,
 			shlex:             shlex,
 			sessionID:         opt.SessionID,
@@ -359,7 +360,7 @@ func toCommand(ic instructions.Command, allDispatchStates *dispatchStates) (comm
 
 type dispatchOpt struct {
 	allDispatchStates *dispatchStates
-	metaArgs          []instructions.ArgCommand
+	metaArgs          []instructions.KeyValuePairOptional
 	buildArgValues    map[string]string
 	shlex             *shell.Lex
 	sessionID         string
@@ -442,7 +443,7 @@ type dispatchState struct {
 	stage        instructions.Stage
 	base         *dispatchState
 	deps         map[*dispatchState]struct{}
-	buildArgs    []instructions.ArgCommand
+	buildArgs    []instructions.KeyValuePairOptional
 	commands     []command
 	ctxPaths     map[string]struct{}
 	ignoreCache  bool
@@ -538,7 +539,7 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE
 	}
 	opt := []llb.RunOption{llb.Args(args)}
 	for _, arg := range d.buildArgs {
-		opt = append(opt, llb.AddEnv(arg.Key, getArgValue(arg)))
+		opt = append(opt, llb.AddEnv(arg.Key, arg.ValueString()))
 	}
 	opt = append(opt, dfCmd(c))
 	if d.ignoreCache {
@@ -770,20 +771,22 @@ func dispatchShell(d *dispatchState, c *instructions.ShellCommand) error {
 	return commitToHistory(&d.image, fmt.Sprintf("SHELL %v", c.Shell), false, nil)
 }
 
-func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instructions.ArgCommand, buildArgValues map[string]string) error {
+func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instructions.KeyValuePairOptional, buildArgValues map[string]string) error {
 	commitStr := "ARG " + c.Key
+	buildArg := setKVValue(c.KeyValuePairOptional, buildArgValues)
+
 	if c.Value != nil {
 		commitStr += "=" + *c.Value
 	}
-	if c.Value == nil {
+	if buildArg.Value == nil {
 		for _, ma := range metaArgs {
-			if ma.Key == c.Key {
-				c.Value = ma.Value
+			if ma.Key == buildArg.Key {
+				buildArg.Value = ma.Value
 			}
 		}
 	}
 
-	d.buildArgs = append(d.buildArgs, setBuildArgValue(*c, buildArgValues))
+	d.buildArgs = append(d.buildArgs, buildArg)
 	return commitToHistory(&d.image, commitStr, false, nil)
 }
 
@@ -834,28 +837,20 @@ func addEnv(env []string, k, v string, override bool) []string {
 	return env
 }
 
-func setBuildArgValue(c instructions.ArgCommand, values map[string]string) instructions.ArgCommand {
-	if v, ok := values[c.Key]; ok {
-		c.Value = &v
+func setKVValue(kvpo instructions.KeyValuePairOptional, values map[string]string) instructions.KeyValuePairOptional {
+	if v, ok := values[kvpo.Key]; ok {
+		kvpo.Value = &v
 	}
-	return c
+	return kvpo
 }
 
-func toEnvList(args []instructions.ArgCommand, env []string) []string {
+func toEnvList(args []instructions.KeyValuePairOptional, env []string) []string {
 	for _, arg := range args {
-		env = addEnv(env, arg.Key, getArgValue(arg), false)
+		env = addEnv(env, arg.Key, arg.ValueString(), false)
 	}
 	return env
 }
 
-func getArgValue(arg instructions.ArgCommand) string {
-	v := ""
-	if arg.Value != nil {
-		v = *arg.Value
-	}
-	return v
-}
-
 func dfCmd(cmd interface{}) llb.ConstraintsOpt {
 	// TODO: add fmt.Stringer to instructions.Command to remove interface{}
 	var cmdStr string
@@ -870,10 +865,10 @@ func dfCmd(cmd interface{}) llb.ConstraintsOpt {
 	})
 }
 
-func runCommandString(args []string, buildArgs []instructions.ArgCommand) string {
+func runCommandString(args []string, buildArgs []instructions.KeyValuePairOptional) string {
 	var tmpBuildEnv []string
 	for _, arg := range buildArgs {
-		tmpBuildEnv = append(tmpBuildEnv, arg.Key+"="+getArgValue(arg))
+		tmpBuildEnv = append(tmpBuildEnv, arg.Key+"="+arg.ValueString())
 	}
 	if len(tmpBuildEnv) > 0 {
 		tmpBuildEnv = append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...)

+ 0 - 86
vendor/github.com/moby/buildkit/frontend/dockerfile/forward.go

@@ -1,86 +0,0 @@
-package dockerfile
-
-import (
-	"context"
-
-	"github.com/moby/buildkit/cache"
-	"github.com/moby/buildkit/frontend"
-	"github.com/moby/buildkit/frontend/gateway/client"
-	"github.com/moby/buildkit/session"
-	"github.com/moby/buildkit/solver"
-	"github.com/moby/buildkit/worker"
-	"github.com/pkg/errors"
-)
-
-func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (*bridgeClient, error) {
-	return &bridgeClient{opts: opts, FrontendLLBBridge: llbBridge, sid: session.FromContext(ctx)}, nil
-}
-
-type bridgeClient struct {
-	frontend.FrontendLLBBridge
-	opts         map[string]string
-	final        *ref
-	sid          string
-	exporterAttr map[string][]byte
-	refs         []*ref
-}
-
-func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest, exporterAttr map[string][]byte, final bool) (client.Reference, error) {
-	r, exporterAttrRes, err := c.FrontendLLBBridge.Solve(ctx, frontend.SolveRequest{
-		Definition:      req.Definition,
-		Frontend:        req.Frontend,
-		FrontendOpt:     req.FrontendOpt,
-		ImportCacheRefs: req.ImportCacheRefs,
-	})
-	if err != nil {
-		return nil, err
-	}
-	rr := &ref{r}
-	c.refs = append(c.refs, rr)
-	if final {
-		c.final = rr
-		if exporterAttr == nil {
-			exporterAttr = make(map[string][]byte)
-		}
-		for k, v := range exporterAttrRes {
-			exporterAttr[k] = v
-		}
-		c.exporterAttr = exporterAttr
-	}
-	return rr, nil
-}
-func (c *bridgeClient) Opts() map[string]string {
-	return c.opts
-}
-func (c *bridgeClient) SessionID() string {
-	return c.sid
-}
-
-type ref struct {
-	solver.CachedResult
-}
-
-func (r *ref) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) {
-	ref, err := r.getImmutableRef()
-	if err != nil {
-		return nil, err
-	}
-	newReq := cache.ReadRequest{
-		Filename: req.Filename,
-	}
-	if r := req.Range; r != nil {
-		newReq.Range = &cache.FileRange{
-			Offset: r.Offset,
-			Length: r.Length,
-		}
-	}
-	return cache.ReadFile(ctx, ref, newReq)
-}
-
-func (r *ref) getImmutableRef() (cache.ImmutableRef, error) {
-	ref, ok := r.CachedResult.Sys().(*worker.WorkerRef)
-	if !ok {
-		return nil, errors.Errorf("invalid ref: %T", r.CachedResult.Sys())
-	}
-	return ref.ImmutableRef, nil
-}

+ 15 - 2
vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go

@@ -18,6 +18,20 @@ func (kvp *KeyValuePair) String() string {
 	return kvp.Key + "=" + kvp.Value
 }
 
+// KeyValuePairOptional is the same as KeyValuePair but Value is optional
+type KeyValuePairOptional struct {
+	Key   string
+	Value *string
+}
+
+func (kvpo *KeyValuePairOptional) ValueString() string {
+	v := ""
+	if kvpo.Value != nil {
+		v = *kvpo.Value
+	}
+	return v
+}
+
 // Command is implemented by every command present in a dockerfile
 type Command interface {
 	Name() string
@@ -346,8 +360,7 @@ func (c *StopSignalCommand) CheckPlatform(platform string) error {
 // Dockerfile author may optionally set a default value of this variable.
 type ArgCommand struct {
 	withNameAndCode
-	Key   string
-	Value *string
+	KeyValuePairOptional
 }
 
 // Expand variables

+ 6 - 10
vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go

@@ -580,10 +580,7 @@ func parseArg(req parseRequest) (*ArgCommand, error) {
 		return nil, errExactlyOneArgument("ARG")
 	}
 
-	var (
-		name     string
-		newValue *string
-	)
+	kvpo := KeyValuePairOptional{}
 
 	arg := req.args[0]
 	// 'arg' can just be a name or name-value pair. Note that this is different
@@ -597,16 +594,15 @@ func parseArg(req parseRequest) (*ArgCommand, error) {
 			return nil, errBlankCommandNames("ARG")
 		}
 
-		name = parts[0]
-		newValue = &parts[1]
+		kvpo.Key = parts[0]
+		kvpo.Value = &parts[1]
 	} else {
-		name = arg
+		kvpo.Key = arg
 	}
 
 	return &ArgCommand{
-		Key:             name,
-		Value:           newValue,
-		withNameAndCode: newWithNameAndCode(req),
+		KeyValuePairOptional: kvpo,
+		withNameAndCode:      newWithNameAndCode(req),
 	}, nil
 }
 

+ 8 - 9
vendor/github.com/moby/buildkit/frontend/frontend.go

@@ -5,26 +5,25 @@ import (
 	"io"
 
 	"github.com/moby/buildkit/cache"
+	"github.com/moby/buildkit/client"
 	"github.com/moby/buildkit/executor"
-	"github.com/moby/buildkit/solver"
-	"github.com/moby/buildkit/solver/pb"
+	gatewayclient "github.com/moby/buildkit/frontend/gateway/client"
 	digest "github.com/opencontainers/go-digest"
 	specs "github.com/opencontainers/image-spec/specs-go/v1"
 )
 
 type Frontend interface {
-	Solve(ctx context.Context, llb FrontendLLBBridge, opt map[string]string) (solver.CachedResult, map[string][]byte, error)
+	Solve(ctx context.Context, llb FrontendLLBBridge, opt map[string]string) (*Result, error)
 }
 
 type FrontendLLBBridge interface {
-	Solve(ctx context.Context, req SolveRequest) (solver.CachedResult, map[string][]byte, error)
+	Solve(ctx context.Context, req SolveRequest) (*Result, error)
 	ResolveImageConfig(ctx context.Context, ref string, platform *specs.Platform) (digest.Digest, []byte, error)
 	Exec(ctx context.Context, meta executor.Meta, rootfs cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error
 }
 
-type SolveRequest struct {
-	Definition      *pb.Definition
-	Frontend        string
-	FrontendOpt     map[string]string
-	ImportCacheRefs []string
+type SolveRequest = gatewayclient.SolveRequest
+
+type WorkerInfos interface {
+	WorkerInfos() []client.WorkerInfo
 }

+ 15 - 4
vendor/github.com/moby/buildkit/frontend/gateway/client/client.go

@@ -8,12 +8,10 @@ import (
 	specs "github.com/opencontainers/image-spec/specs-go/v1"
 )
 
-// TODO: make this take same options as LLBBridge. Add Return()
 type Client interface {
-	Solve(ctx context.Context, req SolveRequest, exporterAttr map[string][]byte, final bool) (Reference, error)
+	Solve(ctx context.Context, req SolveRequest) (*Result, error)
 	ResolveImageConfig(ctx context.Context, ref string, platform *specs.Platform) (digest.Digest, []byte, error)
-	Opts() map[string]string
-	SessionID() string
+	BuildOpts() BuildOpts
 }
 
 type Reference interface {
@@ -39,3 +37,16 @@ type SolveRequest struct {
 	FrontendOpt     map[string]string
 	ImportCacheRefs []string
 }
+
+type WorkerInfo struct {
+	ID        string
+	Labels    map[string]string
+	Platforms []specs.Platform
+}
+
+type BuildOpts struct {
+	Opts      map[string]string
+	SessionID string
+	Workers   []WorkerInfo
+	Product   string
+}

+ 54 - 0
vendor/github.com/moby/buildkit/frontend/gateway/client/result.go

@@ -0,0 +1,54 @@
+package client
+
+import (
+	"context"
+	"sync"
+
+	"github.com/pkg/errors"
+)
+
+type BuildFunc func(context.Context, Client) (*Result, error)
+
+type Result struct {
+	mu       sync.Mutex
+	Ref      Reference
+	Refs     map[string]Reference
+	Metadata map[string][]byte
+}
+
+func NewResult() *Result {
+	return &Result{}
+}
+
+func (r *Result) AddMeta(k string, v []byte) {
+	r.mu.Lock()
+	if r.Metadata == nil {
+		r.Metadata = map[string][]byte{}
+	}
+	r.Metadata[k] = v
+	r.mu.Unlock()
+}
+
+func (r *Result) AddRef(k string, ref Reference) {
+	r.mu.Lock()
+	if r.Refs == nil {
+		r.Refs = map[string]Reference{}
+	}
+	r.Refs[k] = ref
+	r.mu.Unlock()
+}
+
+func (r *Result) SetRef(ref Reference) {
+	r.Ref = ref
+}
+
+func (r *Result) SingleRef() (Reference, error) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
+	if r.Refs != nil && r.Ref == nil {
+		return nil, errors.Errorf("invalid map result")
+	}
+
+	return r.Ref, nil
+}

+ 149 - 0
vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go

@@ -0,0 +1,149 @@
+package forwarder
+
+import (
+	"context"
+	"sync"
+
+	"github.com/moby/buildkit/cache"
+	clienttypes "github.com/moby/buildkit/client"
+	"github.com/moby/buildkit/frontend"
+	"github.com/moby/buildkit/frontend/gateway/client"
+	"github.com/moby/buildkit/session"
+	"github.com/moby/buildkit/solver"
+	"github.com/moby/buildkit/util/apicaps"
+	"github.com/moby/buildkit/worker"
+	"github.com/pkg/errors"
+)
+
+func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, workerInfos []clienttypes.WorkerInfo) (*bridgeClient, error) {
+	return &bridgeClient{
+		opts:              opts,
+		FrontendLLBBridge: llbBridge,
+		sid:               session.FromContext(ctx),
+		workerInfos:       workerInfos,
+		final:             map[*ref]struct{}{},
+	}, nil
+}
+
+type bridgeClient struct {
+	frontend.FrontendLLBBridge
+	mu           sync.Mutex
+	opts         map[string]string
+	final        map[*ref]struct{}
+	sid          string
+	exporterAttr map[string][]byte
+	refs         []*ref
+	workerInfos  []clienttypes.WorkerInfo
+}
+
+func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*client.Result, error) {
+	res, err := c.FrontendLLBBridge.Solve(ctx, frontend.SolveRequest{
+		Definition:      req.Definition,
+		Frontend:        req.Frontend,
+		FrontendOpt:     req.FrontendOpt,
+		ImportCacheRefs: req.ImportCacheRefs,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	cRes := &client.Result{}
+	c.mu.Lock()
+	for k, r := range res.Refs {
+		rr := &ref{r}
+		c.refs = append(c.refs, rr)
+		cRes.AddRef(k, rr)
+	}
+	if r := res.Ref; r != nil {
+		rr := &ref{r}
+		c.refs = append(c.refs, rr)
+		cRes.SetRef(rr)
+	}
+	c.mu.Unlock()
+	cRes.Metadata = res.Metadata
+
+	return cRes, nil
+}
+func (c *bridgeClient) BuildOpts() client.BuildOpts {
+	workers := make([]client.WorkerInfo, 0, len(c.workerInfos))
+	for _, w := range c.workerInfos {
+		workers = append(workers, client.WorkerInfo(w))
+	}
+
+	return client.BuildOpts{
+		Opts:      c.opts,
+		SessionID: c.sid,
+		Workers:   workers,
+		Product:   apicaps.ExportedProduct,
+	}
+}
+
+func (c *bridgeClient) toFrontendResult(r *client.Result) (*frontend.Result, error) {
+	if r == nil {
+		return nil, nil
+	}
+
+	res := &frontend.Result{}
+
+	if r.Refs != nil {
+		res.Refs = make(map[string]solver.CachedResult, len(r.Refs))
+		for k, r := range r.Refs {
+			rr, ok := r.(*ref)
+			if !ok {
+				return nil, errors.Errorf("invalid reference type for forward %T", r)
+			}
+			c.final[rr] = struct{}{}
+			res.Refs[k] = rr.CachedResult
+		}
+	}
+	if r := r.Ref; r != nil {
+		rr, ok := r.(*ref)
+		if !ok {
+			return nil, errors.Errorf("invalid reference type for forward %T", r)
+		}
+		c.final[rr] = struct{}{}
+		res.Ref = rr.CachedResult
+	}
+	res.Metadata = r.Metadata
+
+	return res, nil
+}
+
+func (c *bridgeClient) discard(err error) {
+	for _, r := range c.refs {
+		if r != nil {
+			if _, ok := c.final[r]; !ok || err != nil {
+				r.Release(context.TODO())
+			}
+		}
+	}
+}
+
+type ref struct {
+	solver.CachedResult
+}
+
+func (r *ref) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) {
+	ref, err := r.getImmutableRef()
+	if err != nil {
+		return nil, err
+	}
+	newReq := cache.ReadRequest{
+		Filename: req.Filename,
+	}
+	if r := req.Range; r != nil {
+		newReq.Range = &cache.FileRange{
+			Offset: r.Offset,
+			Length: r.Length,
+		}
+	}
+	return cache.ReadFile(ctx, ref, newReq)
+}
+
+func (r *ref) getImmutableRef() (cache.ImmutableRef, error) {
+	ref, ok := r.CachedResult.Sys().(*worker.WorkerRef)
+	if !ok {
+		return nil, errors.Errorf("invalid ref: %T", r.CachedResult.Sys())
+	}
+	return ref.ImmutableRef, nil
+}

+ 38 - 0
vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go

@@ -0,0 +1,38 @@
+package forwarder
+
+import (
+	"context"
+
+	"github.com/moby/buildkit/frontend"
+	"github.com/moby/buildkit/frontend/gateway/client"
+)
+
+func NewGatewayForwarder(w frontend.WorkerInfos, f client.BuildFunc) frontend.Frontend {
+	return &GatewayForwarder{
+		workers: w,
+		f:       f,
+	}
+}
+
+type GatewayForwarder struct {
+	workers frontend.WorkerInfos
+	f       client.BuildFunc
+}
+
+func (gf *GatewayForwarder) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (retRes *frontend.Result, retErr error) {
+	c, err := llbBridgeToGatewayClient(ctx, llbBridge, opts, gf.workers.WorkerInfos())
+	if err != nil {
+		return nil, err
+	}
+
+	defer func() {
+		c.discard(retErr)
+	}()
+
+	res, err := gf.f(ctx, c)
+	if err != nil {
+		return nil, err
+	}
+
+	return c.toFrontendResult(res)
+}

+ 199 - 52
vendor/github.com/moby/buildkit/frontend/gateway/gateway.go

@@ -12,6 +12,7 @@ import (
 	"time"
 
 	"github.com/docker/distribution/reference"
+	apitypes "github.com/moby/buildkit/api/types"
 	"github.com/moby/buildkit/cache"
 	"github.com/moby/buildkit/client/llb"
 	"github.com/moby/buildkit/executor"
@@ -20,15 +21,19 @@ import (
 	"github.com/moby/buildkit/identity"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/solver"
+	opspb "github.com/moby/buildkit/solver/pb"
+	"github.com/moby/buildkit/util/apicaps"
 	"github.com/moby/buildkit/util/tracing"
 	"github.com/moby/buildkit/worker"
 	specs "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"golang.org/x/net/http2"
+	spb "google.golang.org/genproto/googleapis/rpc/status"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/health"
 	"google.golang.org/grpc/health/grpc_health_v1"
+	"google.golang.org/grpc/status"
 )
 
 const (
@@ -37,11 +42,14 @@ const (
 	exporterImageConfig = "containerimage.config"
 )
 
-func NewGatewayFrontend() frontend.Frontend {
-	return &gatewayFrontend{}
+func NewGatewayFrontend(w frontend.WorkerInfos) frontend.Frontend {
+	return &gatewayFrontend{
+		workers: w,
+	}
 }
 
 type gatewayFrontend struct {
+	workers frontend.WorkerInfos
 }
 
 func filterPrefix(opts map[string]string, pfx string) map[string]string {
@@ -54,10 +62,10 @@ func filterPrefix(opts map[string]string, pfx string) map[string]string {
 	return m
 }
 
-func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (retRef solver.CachedResult, exporterAttr map[string][]byte, retErr error) {
+func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (ret *frontend.Result, retErr error) {
 	source, ok := opts[keySource]
 	if !ok {
-		return nil, nil, errors.Errorf("no source specified for gateway")
+		return nil, errors.Errorf("no source specified for gateway")
 	}
 
 	sid := session.FromContext(ctx)
@@ -68,46 +76,52 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten
 	var readonly bool // TODO: try to switch to read-only by default.
 
 	if isDevel {
-		ref, exp, err := llbBridge.Solve(session.NewContext(ctx, "gateway:"+sid),
+		devRes, err := llbBridge.Solve(session.NewContext(ctx, "gateway:"+sid),
 			frontend.SolveRequest{
 				Frontend:    source,
 				FrontendOpt: filterPrefix(opts, "gateway-"),
 			})
 		if err != nil {
-			return nil, nil, err
+			return nil, err
 		}
-		defer ref.Release(context.TODO())
-
-		workerRef, ok := ref.Sys().(*worker.WorkerRef)
+		defer func() {
+			devRes.EachRef(func(ref solver.CachedResult) error {
+				return ref.Release(context.TODO())
+			})
+		}()
+		if devRes.Ref == nil {
+			return nil, errors.Errorf("development gateway didn't return default result")
+		}
+		workerRef, ok := devRes.Ref.Sys().(*worker.WorkerRef)
 		if !ok {
-			return nil, nil, errors.Errorf("invalid ref: %T", ref.Sys())
+			return nil, errors.Errorf("invalid ref: %T", devRes.Ref.Sys())
 		}
 		rootFS = workerRef.ImmutableRef
-		config, ok := exp[exporterImageConfig]
+		config, ok := devRes.Metadata[exporterImageConfig]
 		if ok {
 			if err := json.Unmarshal(config, &img); err != nil {
-				return nil, nil, err
+				return nil, err
 			}
 		}
 	} else {
 		sourceRef, err := reference.ParseNormalizedNamed(source)
 		if err != nil {
-			return nil, nil, err
+			return nil, err
 		}
 
 		dgst, config, err := llbBridge.ResolveImageConfig(ctx, reference.TagNameOnly(sourceRef).String(), nil) // TODO:
 		if err != nil {
-			return nil, nil, err
+			return nil, err
 		}
 
 		if err := json.Unmarshal(config, &img); err != nil {
-			return nil, nil, err
+			return nil, err
 		}
 
 		if dgst != "" {
 			sourceRef, err = reference.WithDigest(sourceRef, dgst)
 			if err != nil {
-				return nil, nil, err
+				return nil, err
 			}
 		}
 
@@ -115,27 +129,35 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten
 
 		def, err := src.Marshal()
 		if err != nil {
-			return nil, nil, err
+			return nil, err
 		}
 
-		ref, _, err := llbBridge.Solve(ctx, frontend.SolveRequest{
+		res, err := llbBridge.Solve(ctx, frontend.SolveRequest{
 			Definition: def.ToPB(),
 		})
 		if err != nil {
-			return nil, nil, err
+			return nil, err
 		}
-		defer ref.Release(context.TODO())
-		workerRef, ok := ref.Sys().(*worker.WorkerRef)
+		defer func() {
+			res.EachRef(func(ref solver.CachedResult) error {
+				return ref.Release(context.TODO())
+			})
+		}()
+		if res.Ref == nil {
+			return nil, errors.Errorf("gateway source didn't return default result")
+
+		}
+		workerRef, ok := res.Ref.Sys().(*worker.WorkerRef)
 		if !ok {
-			return nil, nil, errors.Errorf("invalid ref: %T", ref.Sys())
+			return nil, errors.Errorf("invalid ref: %T", res.Ref.Sys())
 		}
 		rootFS = workerRef.ImmutableRef
 	}
 
-	lbf, err := newLLBBridgeForwarder(ctx, llbBridge)
+	lbf, err := newLLBBridgeForwarder(ctx, llbBridge, gf.workers)
 	defer lbf.conn.Close()
 	if err != nil {
-		return nil, nil, err
+		return nil, err
 	}
 
 	args := []string{"/run"}
@@ -158,14 +180,32 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten
 
 	env = append(env, "BUILDKIT_SESSION_ID="+sid)
 
+	dt, err := json.Marshal(gf.workers.WorkerInfos())
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to marshal workers array")
+	}
+	env = append(env, "BUILDKIT_WORKERS="+string(dt))
+
 	defer func() {
 		for _, r := range lbf.refs {
-			if r != nil && (lbf.lastRef != r || retErr != nil) {
-				r.Release(context.TODO())
+			if retErr == nil && lbf.result != nil {
+				keep := false
+				lbf.result.EachRef(func(r2 solver.CachedResult) error {
+					if r == r2 {
+						keep = true
+					}
+					return nil
+				})
+				if keep {
+					continue
+				}
 			}
+			r.Release(context.TODO())
 		}
 	}()
 
+	env = append(env, "BUILDKIT_EXPORTEDPRODUCT="+apicaps.ExportedProduct)
+
 	err = llbBridge.Exec(ctx, executor.Meta{
 		Env:            env,
 		Args:           args,
@@ -173,19 +213,24 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten
 		ReadonlyRootFS: readonly,
 	}, rootFS, lbf.Stdin, lbf.Stdout, os.Stderr)
 
+	if lbf.err != nil {
+		return nil, lbf.err
+	}
+
 	if err != nil {
-		return nil, nil, err
+		return nil, err
 	}
 
-	return lbf.lastRef, lbf.exporterAttr, nil
+	return lbf.result, nil
 }
 
-func newLLBBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge) (*llbBridgeForwarder, error) {
+func newLLBBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos) (*llbBridgeForwarder, error) {
 	lbf := &llbBridgeForwarder{
 		callCtx:   ctx,
 		llbBridge: llbBridge,
-		refs:      map[string]solver.Result{},
+		refs:      map[string]solver.CachedResult{},
 		pipe:      newPipe(),
+		workers:   workers,
 	}
 
 	server := grpc.NewServer()
@@ -251,12 +296,17 @@ func (d dummyAddr) String() string {
 }
 
 type llbBridgeForwarder struct {
-	mu           sync.Mutex
-	callCtx      context.Context
-	llbBridge    frontend.FrontendLLBBridge
-	refs         map[string]solver.Result
-	lastRef      solver.CachedResult
+	mu        sync.Mutex
+	callCtx   context.Context
+	llbBridge frontend.FrontendLLBBridge
+	refs      map[string]solver.CachedResult
+	// lastRef      solver.CachedResult
+	// lastRefs     map[string]solver.CachedResult
+	// err          error
+	result       *frontend.Result
+	err          error
 	exporterAttr map[string][]byte
+	workers      frontend.WorkerInfos
 	*pipe
 }
 
@@ -284,7 +334,7 @@ func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.R
 
 func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) (*pb.SolveResponse, error) {
 	ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx)
-	ref, expResp, err := lbf.llbBridge.Solve(ctx, frontend.SolveRequest{
+	res, err := lbf.llbBridge.Solve(ctx, frontend.SolveRequest{
 		Definition:      req.Definition,
 		Frontend:        req.Frontend,
 		FrontendOpt:     req.FrontendOpt,
@@ -294,29 +344,65 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest)
 		return nil, err
 	}
 
-	exp := map[string][]byte{}
-	if err := json.Unmarshal(req.ExporterAttr, &exp); err != nil {
-		return nil, err
+	if len(res.Refs) > 0 && !req.AllowResultReturn {
+		// this should never happen because old client shouldn't make a map request
+		return nil, errors.Errorf("solve did not return default result")
 	}
 
-	if expResp != nil {
-		for k, v := range expResp {
-			exp[k] = v
-		}
-	}
+	pbRes := &pb.Result{}
+	var defaultID string
 
-	id := identity.NewID()
 	lbf.mu.Lock()
-	lbf.refs[id] = ref
+	if res.Refs != nil {
+		ids := make(map[string]string, len(res.Refs))
+		for k, ref := range res.Refs {
+			id := identity.NewID()
+			if ref == nil {
+				id = ""
+			} else {
+				lbf.refs[id] = ref
+			}
+			ids[k] = id
+		}
+		pbRes.Result = &pb.Result_Refs{Refs: &pb.RefMap{Refs: ids}}
+	} else {
+		id := identity.NewID()
+		if res.Ref == nil {
+			id = ""
+		} else {
+			lbf.refs[id] = res.Ref
+		}
+		defaultID = id
+		pbRes.Result = &pb.Result_Ref{Ref: id}
+	}
 	lbf.mu.Unlock()
+
+	// compatibility mode for older clients
 	if req.Final {
-		lbf.lastRef = ref
-		lbf.exporterAttr = exp
+		exp := map[string][]byte{}
+		if err := json.Unmarshal(req.ExporterAttr, &exp); err != nil {
+			return nil, err
+		}
+
+		for k, v := range res.Metadata {
+			exp[k] = v
+		}
+
+		lbf.result = &frontend.Result{
+			Ref:      lbf.refs[defaultID],
+			Metadata: exp,
+		}
 	}
-	if ref == nil {
-		id = ""
+
+	resp := &pb.SolveResponse{
+		Result: pbRes,
 	}
-	return &pb.SolveResponse{Ref: id}, nil
+
+	if !req.AllowResultReturn {
+		resp.Ref = defaultID
+	}
+
+	return resp, nil
 }
 func (lbf *llbBridgeForwarder) ReadFile(ctx context.Context, req *pb.ReadFileRequest) (*pb.ReadFileResponse, error) {
 	ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx)
@@ -353,7 +439,68 @@ func (lbf *llbBridgeForwarder) ReadFile(ctx context.Context, req *pb.ReadFileReq
 }
 
 func (lbf *llbBridgeForwarder) Ping(context.Context, *pb.PingRequest) (*pb.PongResponse, error) {
-	return &pb.PongResponse{}, nil
+
+	workers := lbf.workers.WorkerInfos()
+	pbWorkers := make([]*apitypes.WorkerRecord, 0, len(workers))
+	for _, w := range workers {
+		pbWorkers = append(pbWorkers, &apitypes.WorkerRecord{
+			ID:        w.ID,
+			Labels:    w.Labels,
+			Platforms: opspb.PlatformsFromSpec(w.Platforms),
+		})
+	}
+
+	return &pb.PongResponse{
+		FrontendAPICaps: pb.Caps.All(),
+		Workers:         pbWorkers,
+		// TODO: add LLB info
+	}, nil
+}
+
+func (lbf *llbBridgeForwarder) Return(ctx context.Context, in *pb.ReturnRequest) (*pb.ReturnResponse, error) {
+	if in.Error != nil {
+		lbf.err = status.ErrorProto(&spb.Status{
+			Code:    in.Error.Code,
+			Message: in.Error.Message,
+			// Details: in.Error.Details,
+		})
+	} else {
+		lbf.result = &frontend.Result{
+			Metadata: in.Result.Metadata,
+		}
+
+		switch res := in.Result.Result.(type) {
+		case *pb.Result_Ref:
+			ref, err := lbf.convertRef(res.Ref)
+			if err != nil {
+				return nil, err
+			}
+			lbf.result.Ref = ref
+		case *pb.Result_Refs:
+			m := map[string]solver.CachedResult{}
+			for k, v := range res.Refs.Refs {
+				ref, err := lbf.convertRef(v)
+				if err != nil {
+					return nil, err
+				}
+				m[k] = ref
+			}
+			lbf.result.Refs = m
+		}
+	}
+
+	return &pb.ReturnResponse{}, nil
+}
+
+func (lbf *llbBridgeForwarder) convertRef(id string) (solver.CachedResult, error) {
+	if id == "" {
+		return nil, nil
+	}
+	r, ok := lbf.refs[id]
+	if !ok {
+		return nil, errors.Errorf("return reference %s not found", id)
+	}
+	return r, nil
 }
 
 func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) {

+ 64 - 0
vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go

@@ -0,0 +1,64 @@
+package moby_buildkit_v1_frontend
+
+import "github.com/moby/buildkit/util/apicaps"
+
+var Caps apicaps.CapList
+
+// Every backwards or forwards non-compatible change needs to add a new capability row.
+// By default new capabilities should be experimental. After merge a capability is
+// considered immutable. After a capability is marked stable it should not be disabled.
+
+const (
+	CapSolveBase         apicaps.CapID = "solve.base"
+	CapSolveInlineReturn apicaps.CapID = "solve.inlinereturn"
+	CapResolveImage      apicaps.CapID = "resolveimage"
+	CapReadFile          apicaps.CapID = "readfile"
+	CapReturnResult      apicaps.CapID = "return"
+	CapReturnMap         apicaps.CapID = "returnmap"
+)
+
+func init() {
+
+	Caps.Init(apicaps.Cap{
+		ID:      CapSolveBase,
+		Enabled: true,
+		Status:  apicaps.CapStatusExperimental,
+	})
+
+	Caps.Init(apicaps.Cap{
+		ID:         CapSolveInlineReturn,
+		Name:       "inline return from solve",
+		Enabled:    true,
+		Deprecated: true,
+		Status:     apicaps.CapStatusExperimental,
+	})
+
+	Caps.Init(apicaps.Cap{
+		ID:      CapResolveImage,
+		Name:    "resolve remote image config",
+		Enabled: true,
+		Status:  apicaps.CapStatusExperimental,
+	})
+
+	Caps.Init(apicaps.Cap{
+		ID:      CapReadFile,
+		Name:    "read static file",
+		Enabled: true,
+		Status:  apicaps.CapStatusExperimental,
+	})
+
+	Caps.Init(apicaps.Cap{
+		ID:      CapReturnResult,
+		Name:    "return solve result",
+		Enabled: true,
+		Status:  apicaps.CapStatusExperimental,
+	})
+
+	Caps.Init(apicaps.Cap{
+		ID:      CapReturnMap,
+		Name:    "return reference map",
+		Enabled: true,
+		Status:  apicaps.CapStatusExperimental,
+	})
+
+}

File diff suppressed because it is too large
+ 1248 - 121
vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go


+ 40 - 2
vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto

@@ -3,17 +3,44 @@ syntax = "proto3";
 package moby.buildkit.v1.frontend;
 
 import "github.com/gogo/protobuf/gogoproto/gogo.proto";
+import "github.com/gogo/googleapis/google/rpc/status.proto";
 import "github.com/moby/buildkit/solver/pb/ops.proto";
+import "github.com/moby/buildkit/api/types/worker.proto";
+import "github.com/moby/buildkit/util/apicaps/pb/caps.proto";
 
 option (gogoproto.sizer_all) = true;
 option (gogoproto.marshaler_all) = true;
 option (gogoproto.unmarshaler_all) = true;
 
 service LLBBridge {
+	// apicaps:CapResolveImage
 	rpc ResolveImageConfig(ResolveImageConfigRequest) returns (ResolveImageConfigResponse);
+	// apicaps:CapSolveBase
 	rpc Solve(SolveRequest) returns (SolveResponse);
+	// apicaps:CapReadFile
 	rpc ReadFile(ReadFileRequest) returns (ReadFileResponse);
 	rpc Ping(PingRequest) returns (PongResponse);
+	rpc Return(ReturnRequest) returns (ReturnResponse);
+}
+
+message Result {
+	oneof result {
+		string ref = 1;
+		RefMap refs = 2;
+	}
+	map<string, bytes> metadata = 10;
+}
+
+message RefMap {
+	map<string, string> refs = 1;
+}
+
+message ReturnRequest {
+	Result result = 1;
+	google.rpc.Status error = 2;
+}
+
+message ReturnResponse {
 }
 
 message ResolveImageConfigRequest {
@@ -31,13 +58,21 @@ message SolveRequest {
 	string Frontend = 2;
 	map<string, string> FrontendOpt = 3;
 	repeated string ImportCacheRefs = 4;
+	bool allowResultReturn = 5;
+	
+	// apicaps.CapSolveInlineReturn deprecated
 	bool Final = 10;
 	bytes ExporterAttr = 11;
 }
 
 message SolveResponse {
-	string Ref = 1; // can be used by readfile request
-	bytes ExporterAttr = 2;
+	// deprecated
+	string ref = 1; // can be used by readfile request
+	// deprecated
+/*	bytes ExporterAttr = 2;*/
+	
+	// these fields are returned when allowMapReturn was set
+	Result result = 3;
 }
 
 message ReadFileRequest {
@@ -58,4 +93,7 @@ message ReadFileResponse {
 message PingRequest{
 }
 message PongResponse{
+	repeated moby.buildkit.v1.apicaps.APICap FrontendAPICaps = 1 [(gogoproto.nullable) = false];
+	repeated moby.buildkit.v1.apicaps.APICap LLBCaps = 2 [(gogoproto.nullable) = false];
+	repeated moby.buildkit.v1.types.WorkerRecord Workers = 3;
 }

+ 23 - 0
vendor/github.com/moby/buildkit/frontend/result.go

@@ -0,0 +1,23 @@
+package frontend
+
+import "github.com/moby/buildkit/solver"
+
+type Result struct {
+	Ref      solver.CachedResult
+	Refs     map[string]solver.CachedResult
+	Metadata map[string][]byte
+}
+
+func (r *Result) EachRef(fn func(solver.CachedResult) error) (err error) {
+	if r.Ref != nil {
+		err = fn(r.Ref)
+	}
+	for _, r := range r.Refs {
+		if r != nil {
+			if err1 := fn(r); err1 != nil && err == nil {
+				err = err1
+			}
+		}
+	}
+	return err
+}

+ 39 - 22
vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go

@@ -20,16 +20,20 @@ import (
 )
 
 type llbBridge struct {
-	builder       solver.Builder
-	frontends     map[string]frontend.Frontend
-	resolveWorker func() (worker.Worker, error)
-	ci            *remotecache.CacheImporter
-	cms           map[string]solver.CacheManager
-	cmsMu         sync.Mutex
-	platforms     []specs.Platform
+	builder              solver.Builder
+	frontends            map[string]frontend.Frontend
+	resolveWorker        func() (worker.Worker, error)
+	resolveCacheImporter remotecache.ResolveCacheImporterFunc
+	cms                  map[string]solver.CacheManager
+	cmsMu                sync.Mutex
+	platforms            []specs.Platform
 }
 
-func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res solver.CachedResult, exp map[string][]byte, err error) {
+func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res *frontend.Result, err error) {
+	w, err := b.resolveWorker()
+	if err != nil {
+		return nil, err
+	}
 	var cms []solver.CacheManager
 	for _, ref := range req.ImportCacheRefs {
 		b.cmsMu.Lock()
@@ -37,14 +41,22 @@ func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res s
 		if prevCm, ok := b.cms[ref]; !ok {
 			r, err := reference.ParseNormalizedNamed(ref)
 			if err != nil {
-				return nil, nil, err
+				return nil, err
 			}
 			ref = reference.TagNameOnly(r).String()
 			func(ref string) {
 				cm = newLazyCacheManager(ref, func() (solver.CacheManager, error) {
 					var cmNew solver.CacheManager
 					if err := b.builder.Call(ctx, "importing cache manifest from "+ref, func(ctx context.Context) error {
-						cmNew, err = b.ci.Resolve(ctx, ref)
+						if b.resolveCacheImporter == nil {
+							return errors.New("no cache importer is available")
+						}
+						typ := "" // TODO: support non-registry type
+						ci, desc, err := b.resolveCacheImporter(ctx, typ, ref)
+						if err != nil {
+							return err
+						}
+						cmNew, err = ci.Resolve(ctx, desc, ref, w)
 						return err
 					}); err != nil {
 						return nil, err
@@ -63,38 +75,43 @@ func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res s
 	if req.Definition != nil && req.Definition.Def != nil {
 		edge, err := Load(req.Definition, WithCacheSources(cms), RuntimePlatforms(b.platforms))
 		if err != nil {
-			return nil, nil, err
+			return nil, err
 		}
-		res, err = b.builder.Build(ctx, edge)
+		ref, err := b.builder.Build(ctx, edge)
 		if err != nil {
-			return nil, nil, err
+			return nil, err
 		}
+
+		res = &frontend.Result{Ref: ref}
 	}
 	if req.Frontend != "" {
 		f, ok := b.frontends[req.Frontend]
 		if !ok {
-			return nil, nil, errors.Errorf("invalid frontend: %s", req.Frontend)
+			return nil, errors.Errorf("invalid frontend: %s", req.Frontend)
 		}
-		res, exp, err = f.Solve(ctx, b, req.FrontendOpt)
+		res, err = f.Solve(ctx, b, req.FrontendOpt)
 		if err != nil {
-			return nil, nil, err
+			return nil, err
 		}
 	} else {
 		if req.Definition == nil || req.Definition.Def == nil {
-			return nil, nil, nil
+			return &frontend.Result{}, nil
 		}
 	}
 
-	if res != nil {
-		wr, ok := res.Sys().(*worker.WorkerRef)
+	if err := res.EachRef(func(r solver.CachedResult) error {
+		wr, ok := r.Sys().(*worker.WorkerRef)
 		if !ok {
-			return nil, nil, errors.Errorf("invalid reference for exporting: %T", res.Sys())
+			return errors.Errorf("invalid reference for exporting: %T", r.Sys())
 		}
 		if wr.ImmutableRef != nil {
-			if err := wr.ImmutableRef.Finalize(ctx); err != nil {
-				return nil, nil, err
+			if err := wr.ImmutableRef.Finalize(ctx, false); err != nil {
+				return err
 			}
 		}
+		return nil
+	}); err != nil {
+		return nil, err
 	}
 	return
 }

+ 6 - 2
vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go

@@ -117,12 +117,16 @@ func (b *buildOp) Exec(ctx context.Context, inputs []solver.Result) (outputs []s
 	lm.Unmount()
 	lm = nil
 
-	newref, _, err := b.b.Solve(ctx, frontend.SolveRequest{
+	newRes, err := b.b.Solve(ctx, frontend.SolveRequest{
 		Definition: def.ToPB(),
 	})
 	if err != nil {
 		return nil, err
 	}
 
-	return []solver.Result{newref}, err
+	for _, r := range newRes.Refs {
+		r.Release(context.TODO())
+	}
+
+	return []solver.Result{newRes.Ref}, err
 }

+ 29 - 26
vendor/github.com/moby/buildkit/solver/llbsolver/solver.go

@@ -19,7 +19,7 @@ import (
 
 type ExporterRequest struct {
 	Exporter        exporter.ExporterInstance
-	CacheExporter   *remotecache.RegistryCacheExporter
+	CacheExporter   remotecache.Exporter
 	CacheExportMode solver.CacheExportMode
 }
 
@@ -27,18 +27,18 @@ type ExporterRequest struct {
 type ResolveWorkerFunc func() (worker.Worker, error)
 
 type Solver struct {
-	solver        *solver.Solver
-	resolveWorker ResolveWorkerFunc
-	frontends     map[string]frontend.Frontend
-	ci            *remotecache.CacheImporter
-	platforms     []specs.Platform
+	solver               *solver.Solver
+	resolveWorker        ResolveWorkerFunc
+	frontends            map[string]frontend.Frontend
+	resolveCacheImporter remotecache.ResolveCacheImporterFunc
+	platforms            []specs.Platform
 }
 
-func New(wc *worker.Controller, f map[string]frontend.Frontend, cacheStore solver.CacheKeyStorage, ci *remotecache.CacheImporter) (*Solver, error) {
+func New(wc *worker.Controller, f map[string]frontend.Frontend, cacheStore solver.CacheKeyStorage, resolveCI remotecache.ResolveCacheImporterFunc) (*Solver, error) {
 	s := &Solver{
-		resolveWorker: defaultResolver(wc),
-		frontends:     f,
-		ci:            ci,
+		resolveWorker:        defaultResolver(wc),
+		frontends:            f,
+		resolveCacheImporter: resolveCI,
 	}
 
 	results := newCacheResultStorage(wc)
@@ -71,12 +71,12 @@ func (s *Solver) resolver() solver.ResolveOpFunc {
 
 func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge {
 	return &llbBridge{
-		builder:       b,
-		frontends:     s.frontends,
-		resolveWorker: s.resolveWorker,
-		ci:            s.ci,
-		cms:           map[string]solver.CacheManager{},
-		platforms:     s.platforms,
+		builder:              b,
+		frontends:            s.frontends,
+		resolveWorker:        s.resolveWorker,
+		resolveCacheImporter: s.resolveCacheImporter,
+		cms:                  map[string]solver.CacheManager{},
+		platforms:            s.platforms,
 	}
 }
 
@@ -90,21 +90,22 @@ func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest
 
 	j.SessionID = session.FromContext(ctx)
 
-	res, exporterOpt, err := s.Bridge(j).Solve(ctx, req)
+	res, err := s.Bridge(j).Solve(ctx, req)
 	if err != nil {
 		return nil, err
 	}
 
 	defer func() {
-		if res != nil {
-			go res.Release(context.TODO())
-		}
+		res.EachRef(func(ref solver.CachedResult) error {
+			go ref.Release(context.TODO())
+			return nil
+		})
 	}()
 
 	var exporterResponse map[string]string
 	if exp := exp.Exporter; exp != nil {
 		var immutable cache.ImmutableRef
-		if res != nil {
+		if res := res.Ref; res != nil { // FIXME(tonistiigi):
 			workerRef, ok := res.Sys().(*worker.WorkerRef)
 			if !ok {
 				return nil, errors.Errorf("invalid reference: %T", res.Sys())
@@ -113,7 +114,7 @@ func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest
 		}
 
 		if err := j.Call(ctx, exp.Name(), func(ctx context.Context) error {
-			exporterResponse, err = exp.Export(ctx, immutable, exporterOpt)
+			exporterResponse, err = exp.Export(ctx, immutable, res.Metadata)
 			return err
 		}); err != nil {
 			return nil, err
@@ -123,14 +124,16 @@ func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest
 	if e := exp.CacheExporter; e != nil {
 		if err := j.Call(ctx, "exporting cache", func(ctx context.Context) error {
 			prepareDone := oneOffProgress(ctx, "preparing build cache for export")
-			if _, err := res.CacheKey().Exporter.ExportTo(ctx, e, solver.CacheExportOpt{
-				Convert: workerRefConverter,
-				Mode:    exp.CacheExportMode,
+			if err := res.EachRef(func(res solver.CachedResult) error {
+				_, err := res.CacheKey().Exporter.ExportTo(ctx, e, solver.CacheExportOpt{
+					Convert: workerRefConverter,
+					Mode:    exp.CacheExportMode,
+				})
+				return err
 			}); err != nil {
 				return prepareDone(err)
 			}
 			prepareDone(nil)
-
 			return e.Finalize(ctx)
 		}); err != nil {
 			return nil, err

+ 3 - 2
vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go

@@ -51,8 +51,9 @@ func WithCacheSources(cms []solver.CacheManager) LoadOpt {
 
 func RuntimePlatforms(p []specs.Platform) LoadOpt {
 	var defaultPlatform *pb.Platform
+	pp := make([]specs.Platform, len(p))
 	for i := range p {
-		p[i] = platforms.Normalize(p[i])
+		pp[i] = platforms.Normalize(p[i])
 	}
 	return func(op *pb.Op, _ *pb.OpMetadata, opt *solver.VertexOptions) error {
 		if op.Platform == nil {
@@ -67,7 +68,7 @@ func RuntimePlatforms(p []specs.Platform) LoadOpt {
 		}
 		if _, ok := op.Op.(*pb.Op_Exec); ok {
 			var found bool
-			for _, pp := range p {
+			for _, pp := range pp {
 				if pp.OS == op.Platform.OS && pp.Architecture == op.Platform.Architecture && pp.Variant == op.Platform.Variant {
 					found = true
 					break

+ 1 - 0
vendor/github.com/moby/buildkit/solver/pb/attr.go

@@ -3,6 +3,7 @@ package pb
 const AttrKeepGitDir = "git.keepgitdir"
 const AttrFullRemoteURL = "git.fullurl"
 const AttrLocalSessionID = "local.session"
+const AttrLocalUniqueID = "local.unique"
 const AttrIncludePatterns = "local.includepattern"
 const AttrFollowPaths = "local.followpaths"
 const AttrExcludePatterns = "local.excludepatterns"

+ 1 - 0
vendor/github.com/moby/buildkit/solver/pb/ops.pb.go

@@ -609,6 +609,7 @@ func (m *SourceOp) GetAttrs() map[string]string {
 }
 
 // BuildOp is used for nested build invocation.
+// BuildOp is experimental and can break without backwards compatibility
 type BuildOp struct {
 	Builder InputIndex             `protobuf:"varint,1,opt,name=builder,proto3,customtype=InputIndex" json:"builder"`
 	Inputs  map[string]*BuildInput `protobuf:"bytes,2,rep,name=inputs" json:"inputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"`

+ 1 - 0
vendor/github.com/moby/buildkit/solver/pb/ops.proto

@@ -114,6 +114,7 @@ message SourceOp {
 }
 
 // BuildOp is used for nested build invocation.
+// BuildOp is experimental and can break without backwards compatibility
 message BuildOp {
 	int64 builder = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
 	map<string, BuildInput> inputs = 2;

+ 41 - 0
vendor/github.com/moby/buildkit/solver/pb/platform.go

@@ -0,0 +1,41 @@
+package pb
+
+import (
+	specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+func (p *Platform) Spec() specs.Platform {
+	return specs.Platform{
+		OS:           p.OS,
+		Architecture: p.Architecture,
+		Variant:      p.Variant,
+		OSVersion:    p.OSVersion,
+		OSFeatures:   p.OSFeatures,
+	}
+}
+
+func PlatformFromSpec(p specs.Platform) Platform {
+	return Platform{
+		OS:           p.OS,
+		Architecture: p.Architecture,
+		Variant:      p.Variant,
+		OSVersion:    p.OSVersion,
+		OSFeatures:   p.OSFeatures,
+	}
+}
+
+func ToSpecPlatforms(p []Platform) []specs.Platform {
+	out := make([]specs.Platform, 0, len(p))
+	for _, pp := range p {
+		out = append(out, pp.Spec())
+	}
+	return out
+}
+
+func PlatformsFromSpec(p []specs.Platform) []Platform {
+	out := make([]Platform, 0, len(p))
+	for _, pp := range p {
+		out = append(out, PlatformFromSpec(pp))
+	}
+	return out
+}

+ 161 - 0
vendor/github.com/moby/buildkit/util/apicaps/caps.go

@@ -0,0 +1,161 @@
+package apicaps
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+
+	pb "github.com/moby/buildkit/util/apicaps/pb"
+	"github.com/pkg/errors"
+)
+
+type PBCap = pb.APICap
+
+// ExportedProduct is the name of the product using this package.
+// Users vendoring this library may override it to provide better versioning hints
+// for their users (or set it with a flag to buildkitd).
+var ExportedProduct string
+
+// CapStatus defines the stability properties of a capability
+type CapStatus int
+
+const (
+	// CapStatusStable refers to a capability that should never be changed in
+	// backwards incompatible manner unless there is a serious security issue.
+	CapStatusStable CapStatus = iota
+	// CapStatusExperimental refers to a capability that may be removed in the future.
+	// If incompatible changes are made the previous ID is disabled and new is added.
+	CapStatusExperimental
+	// CapStatusPrerelease is same as CapStatusExperimental that can be used for new
+	// features before they move to stable.
+	CapStatusPrerelease
+)
+
+// CapID is type for capability identifier
+type CapID string
+
+// Cap describes an API feature
+type Cap struct {
+	ID                  CapID
+	Name                string // readable name, may contain spaces but keep in one sentence
+	Status              CapStatus
+	Enabled             bool
+	Deprecated          bool
+	SupportedHint       map[string]string
+	DisabledReason      string
+	DisabledReasonMsg   string
+	DisabledAlternative string
+}
+
+// CapList is a collection of capability definitions
+type CapList struct {
+	m map[CapID]Cap
+}
+
+// Init initializes definition for a new capability.
+// Not safe to be called concurrently with other methods.
+func (l *CapList) Init(cc ...Cap) {
+	if l.m == nil {
+		l.m = make(map[CapID]Cap, len(cc))
+	}
+	for _, c := range cc {
+		l.m[c.ID] = c
+	}
+}
+
+// All reports the configuration of all known capabilities
+func (l *CapList) All() []pb.APICap {
+	out := make([]pb.APICap, 0, len(l.m))
+	for _, c := range l.m {
+		out = append(out, pb.APICap{
+			ID:                  string(c.ID),
+			Enabled:             c.Enabled,
+			Deprecated:          c.Deprecated,
+			DisabledReason:      c.DisabledReason,
+			DisabledReasonMsg:   c.DisabledReasonMsg,
+			DisabledAlternative: c.DisabledAlternative,
+		})
+	}
+	sort.Slice(out, func(i, j int) bool {
+		return out[i].ID < out[j].ID
+	})
+	return out
+}
+
+// CapSet returns a CapSet for an capability configuration
+func (l *CapList) CapSet(caps []pb.APICap) CapSet {
+	m := make(map[string]*pb.APICap, len(caps))
+	for _, c := range caps {
+		if c.ID != "" {
+			m[c.ID] = &c
+		}
+	}
+	return CapSet{
+		list: l,
+		set:  m,
+	}
+}
+
+// CapSet is a configuration for detecting supported capabilities
+type CapSet struct {
+	list *CapList
+	set  map[string]*pb.APICap
+}
+
+// Supports returns an error if capability is not supported
+func (s *CapSet) Supports(id CapID) error {
+	err := &CapError{ID: id}
+	c, ok := s.list.m[id]
+	if !ok {
+		return errors.WithStack(err)
+	}
+	err.Definition = &c
+	state, ok := s.set[string(id)]
+	if !ok {
+		return errors.WithStack(err)
+	}
+	err.State = state
+	if !state.Enabled {
+		return errors.WithStack(err)
+	}
+	return nil
+}
+
+// CapError is an error for unsupported capability
+type CapError struct {
+	ID         CapID
+	Definition *Cap
+	State      *pb.APICap
+}
+
+func (e CapError) Error() string {
+	if e.Definition == nil {
+		return fmt.Sprintf("unknown API capability %s", e.ID)
+	}
+	typ := ""
+	if e.Definition.Status == CapStatusExperimental {
+		typ = "experimental "
+	}
+	if e.Definition.Status == CapStatusPrerelease {
+		typ = "prerelease "
+	}
+	name := ""
+	if e.Definition.Name != "" {
+		name = "(" + e.Definition.Name + ")"
+	}
+	b := &strings.Builder{}
+	fmt.Fprintf(b, "requested %sfeature %s %s", typ, e.ID, name)
+	if e.State == nil {
+		fmt.Fprint(b, " is not supported by build server")
+		if hint, ok := e.Definition.SupportedHint[ExportedProduct]; ok {
+			fmt.Fprintf(b, " (added in %s)", hint)
+		}
+		fmt.Fprintf(b, ", please update %s", ExportedProduct)
+	} else {
+		fmt.Fprint(b, " has been disabled on the build server")
+		if e.State.DisabledReasonMsg != "" {
+			fmt.Fprintf(b, ": %s", e.State.DisabledReasonMsg)
+		}
+	}
+	return b.String()
+}

+ 535 - 0
vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go

@@ -0,0 +1,535 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: caps.proto
+
+/*
+	Package moby_buildkit_v1_apicaps is a generated protocol buffer package.
+
+	It is generated from these files:
+		caps.proto
+
+	It has these top-level messages:
+		APICap
+*/
+package moby_buildkit_v1_apicaps
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+// APICap defines a capability supported by the service
+type APICap struct {
+	ID                  string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	Enabled             bool   `protobuf:"varint,2,opt,name=Enabled,proto3" json:"Enabled,omitempty"`
+	Deprecated          bool   `protobuf:"varint,3,opt,name=Deprecated,proto3" json:"Deprecated,omitempty"`
+	DisabledReason      string `protobuf:"bytes,4,opt,name=DisabledReason,proto3" json:"DisabledReason,omitempty"`
+	DisabledReasonMsg   string `protobuf:"bytes,5,opt,name=DisabledReasonMsg,proto3" json:"DisabledReasonMsg,omitempty"`
+	DisabledAlternative string `protobuf:"bytes,6,opt,name=DisabledAlternative,proto3" json:"DisabledAlternative,omitempty"`
+}
+
+func (m *APICap) Reset()                    { *m = APICap{} }
+func (m *APICap) String() string            { return proto.CompactTextString(m) }
+func (*APICap) ProtoMessage()               {}
+func (*APICap) Descriptor() ([]byte, []int) { return fileDescriptorCaps, []int{0} }
+
+func (m *APICap) GetID() string {
+	if m != nil {
+		return m.ID
+	}
+	return ""
+}
+
+func (m *APICap) GetEnabled() bool {
+	if m != nil {
+		return m.Enabled
+	}
+	return false
+}
+
+func (m *APICap) GetDeprecated() bool {
+	if m != nil {
+		return m.Deprecated
+	}
+	return false
+}
+
+func (m *APICap) GetDisabledReason() string {
+	if m != nil {
+		return m.DisabledReason
+	}
+	return ""
+}
+
+func (m *APICap) GetDisabledReasonMsg() string {
+	if m != nil {
+		return m.DisabledReasonMsg
+	}
+	return ""
+}
+
+func (m *APICap) GetDisabledAlternative() string {
+	if m != nil {
+		return m.DisabledAlternative
+	}
+	return ""
+}
+
+func init() {
+	proto.RegisterType((*APICap)(nil), "moby.buildkit.v1.apicaps.APICap")
+}
+func (m *APICap) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *APICap) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintCaps(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	if m.Enabled {
+		dAtA[i] = 0x10
+		i++
+		if m.Enabled {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.Deprecated {
+		dAtA[i] = 0x18
+		i++
+		if m.Deprecated {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if len(m.DisabledReason) > 0 {
+		dAtA[i] = 0x22
+		i++
+		i = encodeVarintCaps(dAtA, i, uint64(len(m.DisabledReason)))
+		i += copy(dAtA[i:], m.DisabledReason)
+	}
+	if len(m.DisabledReasonMsg) > 0 {
+		dAtA[i] = 0x2a
+		i++
+		i = encodeVarintCaps(dAtA, i, uint64(len(m.DisabledReasonMsg)))
+		i += copy(dAtA[i:], m.DisabledReasonMsg)
+	}
+	if len(m.DisabledAlternative) > 0 {
+		dAtA[i] = 0x32
+		i++
+		i = encodeVarintCaps(dAtA, i, uint64(len(m.DisabledAlternative)))
+		i += copy(dAtA[i:], m.DisabledAlternative)
+	}
+	return i, nil
+}
+
+func encodeVarintCaps(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *APICap) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovCaps(uint64(l))
+	}
+	if m.Enabled {
+		n += 2
+	}
+	if m.Deprecated {
+		n += 2
+	}
+	l = len(m.DisabledReason)
+	if l > 0 {
+		n += 1 + l + sovCaps(uint64(l))
+	}
+	l = len(m.DisabledReasonMsg)
+	if l > 0 {
+		n += 1 + l + sovCaps(uint64(l))
+	}
+	l = len(m.DisabledAlternative)
+	if l > 0 {
+		n += 1 + l + sovCaps(uint64(l))
+	}
+	return n
+}
+
+func sovCaps(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozCaps(x uint64) (n int) {
+	return sovCaps(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *APICap) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCaps
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: APICap: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: APICap: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCaps
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthCaps
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCaps
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Enabled = bool(v != 0)
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Deprecated", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCaps
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Deprecated = bool(v != 0)
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DisabledReason", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCaps
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthCaps
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DisabledReason = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DisabledReasonMsg", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCaps
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthCaps
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DisabledReasonMsg = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DisabledAlternative", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCaps
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthCaps
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DisabledAlternative = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCaps(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCaps
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipCaps(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowCaps
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowCaps
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowCaps
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthCaps
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowCaps
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipCaps(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthCaps = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowCaps   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("caps.proto", fileDescriptorCaps) }
+
+var fileDescriptorCaps = []byte{
+	// 236 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0x4e, 0x2c, 0x28,
+	0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4, 0x4b, 0x2a, 0xcd,
+	0xcc, 0x49, 0xc9, 0xce, 0x2c, 0xd1, 0x2b, 0x33, 0xd4, 0x4b, 0x2c, 0xc8, 0x04, 0xc9, 0x4b, 0xe9,
+	0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7, 0xe7, 0xeb,
+	0x83, 0x35, 0x24, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0x31, 0x48, 0xe9, 0x16, 0x23,
+	0x17, 0x9b, 0x63, 0x80, 0xa7, 0x73, 0x62, 0x81, 0x10, 0x1f, 0x17, 0x93, 0xa7, 0x8b, 0x04, 0xa3,
+	0x02, 0xa3, 0x06, 0x67, 0x10, 0x93, 0xa7, 0x8b, 0x90, 0x04, 0x17, 0xbb, 0x6b, 0x5e, 0x62, 0x52,
+	0x4e, 0x6a, 0x8a, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x8c, 0x2b, 0x24, 0xc7, 0xc5, 0xe5,
+	0x92, 0x5a, 0x50, 0x94, 0x9a, 0x9c, 0x58, 0x92, 0x9a, 0x22, 0xc1, 0x0c, 0x96, 0x44, 0x12, 0x11,
+	0x52, 0xe3, 0xe2, 0x73, 0xc9, 0x2c, 0x06, 0xab, 0x0d, 0x4a, 0x4d, 0x2c, 0xce, 0xcf, 0x93, 0x60,
+	0x01, 0x9b, 0x8a, 0x26, 0x2a, 0xa4, 0xc3, 0x25, 0x88, 0x2a, 0xe2, 0x5b, 0x9c, 0x2e, 0xc1, 0x0a,
+	0x56, 0x8a, 0x29, 0x21, 0x64, 0xc0, 0x25, 0x0c, 0x13, 0x74, 0xcc, 0x29, 0x49, 0x2d, 0xca, 0x4b,
+	0x2c, 0xc9, 0x2c, 0x4b, 0x95, 0x60, 0x03, 0xab, 0xc7, 0x26, 0xe5, 0xc4, 0x73, 0xe2, 0x91, 0x1c,
+	0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x26, 0xb1, 0x81, 0x7d, 0x6c, 0x0c, 0x08,
+	0x00, 0x00, 0xff, 0xff, 0x02, 0x2d, 0x9e, 0x91, 0x48, 0x01, 0x00, 0x00,
+}

+ 19 - 0
vendor/github.com/moby/buildkit/util/apicaps/pb/caps.proto

@@ -0,0 +1,19 @@
+syntax = "proto3";
+
+package moby.buildkit.v1.apicaps;
+
+import "github.com/gogo/protobuf/gogoproto/gogo.proto";
+
+option (gogoproto.sizer_all) = true;
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+// APICap defines a capability supported by the service
+message APICap {
+	string ID = 1;
+	bool Enabled = 2;
+	bool Deprecated = 3; // Unused. May be used for warnings in the future
+	string DisabledReason = 4; // Reason key for detection code
+	string DisabledReasonMsg = 5; // Message to the user
+	string DisabledAlternative = 6; // Identifier that updated client could catch.
+}

+ 3 - 0
vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go

@@ -0,0 +1,3 @@
+package moby_buildkit_v1_apicaps
+
+//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. caps.proto

+ 5 - 12
vendor/github.com/moby/buildkit/util/contentutil/fetcher.go

@@ -5,35 +5,28 @@ import (
 	"io"
 
 	"github.com/containerd/containerd/content"
-	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/remotes"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 )
 
-func FromFetcher(f remotes.Fetcher, desc ocispec.Descriptor) content.Provider {
+func FromFetcher(f remotes.Fetcher) content.Provider {
 	return &fetchedProvider{
-		f:    f,
-		desc: desc,
+		f: f,
 	}
 }
 
 type fetchedProvider struct {
-	f    remotes.Fetcher
-	desc ocispec.Descriptor
+	f remotes.Fetcher
 }
 
 func (p *fetchedProvider) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
-	if desc.Digest != p.desc.Digest {
-		return nil, errors.Wrapf(errdefs.ErrNotFound, "content %v", desc.Digest)
-	}
-
-	rc, err := p.f.Fetch(ctx, p.desc)
+	rc, err := p.f.Fetch(ctx, desc)
 	if err != nil {
 		return nil, err
 	}
 
-	return &readerAt{Reader: rc, Closer: rc, size: p.desc.Size}, nil
+	return &readerAt{Reader: rc, Closer: rc, size: desc.Size}, nil
 }
 
 type readerAt struct {

+ 58 - 0
vendor/github.com/moby/buildkit/util/contentutil/pusher.go

@@ -0,0 +1,58 @@
+package contentutil
+
+import (
+	"context"
+
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/remotes"
+	"github.com/pkg/errors"
+)
+
+func FromPusher(p remotes.Pusher) content.Ingester {
+	return &pushingIngester{
+		p: p,
+	}
+}
+
+type pushingIngester struct {
+	p remotes.Pusher
+}
+
+// Writer implements content.Ingester. desc.MediaType must be set for manifest blobs.
+func (i *pushingIngester) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
+	var wOpts content.WriterOpts
+	for _, opt := range opts {
+		if err := opt(&wOpts); err != nil {
+			return nil, err
+		}
+	}
+	if wOpts.Ref == "" {
+		return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty")
+	}
+	// pusher requires desc.MediaType to determine the PUT URL, especially for manifest blobs.
+	contentWriter, err := i.p.Push(ctx, wOpts.Desc)
+	if err != nil {
+		return nil, err
+	}
+	return &writer{
+		Writer:           contentWriter,
+		contentWriterRef: wOpts.Ref,
+	}, nil
+}
+
+type writer struct {
+	content.Writer          // returned from pusher.Push
+	contentWriterRef string // ref passed for Writer()
+}
+
+func (w *writer) Status() (content.Status, error) {
+	st, err := w.Writer.Status()
+	if err != nil {
+		return st, err
+	}
+	if w.contentWriterRef != "" {
+		st.Ref = w.contentWriterRef
+	}
+	return st, nil
+}

+ 0 - 1
vendor/github.com/moby/buildkit/util/libcontainer_specconv/README.md

@@ -1 +0,0 @@
-Temporary forked from https://github.com/opencontainers/runc/pull/1692

+ 0 - 190
vendor/github.com/moby/buildkit/util/libcontainer_specconv/example.go

@@ -1,190 +0,0 @@
-package specconv
-
-import (
-	"os"
-	"sort"
-	"strings"
-
-	"github.com/opencontainers/runc/libcontainer/system"
-	"github.com/opencontainers/runc/libcontainer/user"
-	"github.com/opencontainers/runtime-spec/specs-go"
-)
-
-// RootlessOpts is an optional spec for ToRootless
-type RootlessOpts struct {
-	// Add sub{u,g}id to spec.Linux.{U,G}IDMappings.
-	// Requires newuidmap(1) and newgidmap(1) with suid bit.
-	// Ignored when running in userns.
-	MapSubUIDGID bool
-}
-
-// Run-time context for ToRootless.
-type RootlessContext struct {
-	EUID     uint32
-	EGID     uint32
-	SubUIDs  []user.SubID
-	SubGIDs  []user.SubID
-	UIDMap   []user.IDMap
-	GIDMap   []user.IDMap
-	InUserNS bool
-}
-
-// ToRootless converts the given spec file into one that should work with
-// rootless containers, by removing incompatible options and adding others that
-// are needed.
-func ToRootless(spec *specs.Spec, opts *RootlessOpts) error {
-	var err error
-	ctx := RootlessContext{}
-	ctx.EUID = uint32(os.Geteuid())
-	ctx.EGID = uint32(os.Getegid())
-	ctx.SubUIDs, err = user.CurrentUserSubUIDs()
-	if err != nil && !os.IsNotExist(err) {
-		return err
-	}
-	ctx.SubGIDs, err = user.CurrentGroupSubGIDs()
-	if err != nil && !os.IsNotExist(err) {
-		return err
-	}
-	ctx.UIDMap, err = user.CurrentProcessUIDMap()
-	if err != nil && !os.IsNotExist(err) {
-		return err
-	}
-	uidMapExists := !os.IsNotExist(err)
-	ctx.GIDMap, err = user.CurrentProcessUIDMap()
-	if err != nil && !os.IsNotExist(err) {
-		return err
-	}
-	ctx.InUserNS = uidMapExists && system.UIDMapInUserNS(ctx.UIDMap)
-	return ToRootlessWithContext(ctx, spec, opts)
-}
-
-// ToRootlessWithContext converts the spec with the run-time context.
-// ctx can be internally modified for sorting.
-func ToRootlessWithContext(ctx RootlessContext, spec *specs.Spec, opts *RootlessOpts) error {
-	if opts == nil {
-		opts = &RootlessOpts{}
-	}
-	var namespaces []specs.LinuxNamespace
-
-	// Remove networkns from the spec.
-	for _, ns := range spec.Linux.Namespaces {
-		switch ns.Type {
-		case specs.NetworkNamespace, specs.UserNamespace:
-			// Do nothing.
-		default:
-			namespaces = append(namespaces, ns)
-		}
-	}
-	// Add userns to the spec.
-	namespaces = append(namespaces, specs.LinuxNamespace{
-		Type: specs.UserNamespace,
-	})
-	spec.Linux.Namespaces = namespaces
-
-	// Add mappings for the current user.
-	if ctx.InUserNS {
-		uNextContainerID := int64(0)
-		sort.Sort(idmapSorter(ctx.UIDMap))
-		for _, uidmap := range ctx.UIDMap {
-			spec.Linux.UIDMappings = append(spec.Linux.UIDMappings,
-				specs.LinuxIDMapping{
-					HostID:      uint32(uidmap.ID),
-					ContainerID: uint32(uNextContainerID),
-					Size:        uint32(uidmap.Count),
-				})
-			uNextContainerID += uidmap.Count
-		}
-		gNextContainerID := int64(0)
-		sort.Sort(idmapSorter(ctx.GIDMap))
-		for _, gidmap := range ctx.GIDMap {
-			spec.Linux.GIDMappings = append(spec.Linux.GIDMappings,
-				specs.LinuxIDMapping{
-					HostID:      uint32(gidmap.ID),
-					ContainerID: uint32(gNextContainerID),
-					Size:        uint32(gidmap.Count),
-				})
-			gNextContainerID += gidmap.Count
-		}
-		// opts.MapSubUIDGID is ignored in userns
-	} else {
-		spec.Linux.UIDMappings = []specs.LinuxIDMapping{{
-			HostID:      ctx.EUID,
-			ContainerID: 0,
-			Size:        1,
-		}}
-		spec.Linux.GIDMappings = []specs.LinuxIDMapping{{
-			HostID:      ctx.EGID,
-			ContainerID: 0,
-			Size:        1,
-		}}
-		if opts.MapSubUIDGID {
-			uNextContainerID := int64(1)
-			sort.Sort(subIDSorter(ctx.SubUIDs))
-			for _, subuid := range ctx.SubUIDs {
-				spec.Linux.UIDMappings = append(spec.Linux.UIDMappings,
-					specs.LinuxIDMapping{
-						HostID:      uint32(subuid.SubID),
-						ContainerID: uint32(uNextContainerID),
-						Size:        uint32(subuid.Count),
-					})
-				uNextContainerID += subuid.Count
-			}
-			gNextContainerID := int64(1)
-			sort.Sort(subIDSorter(ctx.SubGIDs))
-			for _, subgid := range ctx.SubGIDs {
-				spec.Linux.GIDMappings = append(spec.Linux.GIDMappings,
-					specs.LinuxIDMapping{
-						HostID:      uint32(subgid.SubID),
-						ContainerID: uint32(gNextContainerID),
-						Size:        uint32(subgid.Count),
-					})
-				gNextContainerID += subgid.Count
-			}
-		}
-	}
-
-	// Fix up mounts.
-	var mounts []specs.Mount
-	for _, mount := range spec.Mounts {
-		// Ignore all mounts that are under /sys.
-		if strings.HasPrefix(mount.Destination, "/sys") {
-			continue
-		}
-
-		// Remove all gid= and uid= mappings.
-		var options []string
-		for _, option := range mount.Options {
-			if !strings.HasPrefix(option, "gid=") && !strings.HasPrefix(option, "uid=") {
-				options = append(options, option)
-			}
-		}
-
-		mount.Options = options
-		mounts = append(mounts, mount)
-	}
-	// Add the sysfs mount as an rbind.
-	mounts = append(mounts, specs.Mount{
-		Source:      "/sys",
-		Destination: "/sys",
-		Type:        "none",
-		Options:     []string{"rbind", "nosuid", "noexec", "nodev", "ro"},
-	})
-	spec.Mounts = mounts
-
-	// Remove cgroup settings.
-	spec.Linux.Resources = nil
-	return nil
-}
-
-// subIDSorter is required for Go <= 1.7
-type subIDSorter []user.SubID
-
-func (x subIDSorter) Len() int           { return len(x) }
-func (x subIDSorter) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }
-func (x subIDSorter) Less(i, j int) bool { return x[i].SubID < x[j].SubID }
-
-type idmapSorter []user.IDMap
-
-func (x idmapSorter) Len() int           { return len(x) }
-func (x idmapSorter) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }
-func (x idmapSorter) Less(i, j int) bool { return x[i].ID < x[j].ID }

+ 0 - 184
vendor/github.com/moby/buildkit/util/push/push.go

@@ -1,184 +0,0 @@
-package push
-
-import (
-	"context"
-	"encoding/json"
-	"fmt"
-	"sync"
-	"time"
-
-	"github.com/containerd/containerd/content"
-	"github.com/containerd/containerd/images"
-	"github.com/containerd/containerd/remotes"
-	"github.com/containerd/containerd/remotes/docker"
-	"github.com/docker/distribution/reference"
-	"github.com/moby/buildkit/session"
-	"github.com/moby/buildkit/session/auth"
-	"github.com/moby/buildkit/util/imageutil"
-	"github.com/moby/buildkit/util/progress"
-	"github.com/moby/buildkit/util/tracing"
-	digest "github.com/opencontainers/go-digest"
-	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/sirupsen/logrus"
-)
-
-func getCredentialsFunc(ctx context.Context, sm *session.Manager) func(string) (string, string, error) {
-	id := session.FromContext(ctx)
-	if id == "" {
-		return nil
-	}
-	return func(host string) (string, string, error) {
-		timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
-		defer cancel()
-
-		caller, err := sm.Get(timeoutCtx, id)
-		if err != nil {
-			return "", "", err
-		}
-
-		return auth.CredentialsFunc(context.TODO(), caller)(host)
-	}
-}
-
-func Push(ctx context.Context, sm *session.Manager, cs content.Provider, dgst digest.Digest, ref string, insecure bool) error {
-	desc := ocispec.Descriptor{
-		Digest: dgst,
-	}
-	parsed, err := reference.ParseNormalizedNamed(ref)
-	if err != nil {
-		return err
-	}
-	ref = reference.TagNameOnly(parsed).String()
-
-	resolver := docker.NewResolver(docker.ResolverOptions{
-		Client:      tracing.DefaultClient,
-		Credentials: getCredentialsFunc(ctx, sm),
-		PlainHTTP:   insecure,
-	})
-
-	pusher, err := resolver.Pusher(ctx, ref)
-	if err != nil {
-		return err
-	}
-
-	var m sync.Mutex
-	manifestStack := []ocispec.Descriptor{}
-
-	filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
-		switch desc.MediaType {
-		case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
-			images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
-			m.Lock()
-			manifestStack = append(manifestStack, desc)
-			m.Unlock()
-			return nil, images.ErrStopHandler
-		default:
-			return nil, nil
-		}
-	})
-
-	pushHandler := remotes.PushHandler(pusher, cs)
-
-	handlers := append([]images.Handler{},
-		childrenHandler(cs),
-		filterHandler,
-		pushHandler,
-	)
-
-	ra, err := cs.ReaderAt(ctx, desc)
-	if err != nil {
-		return err
-	}
-
-	mtype, err := imageutil.DetectManifestMediaType(ra)
-	if err != nil {
-		return err
-	}
-
-	layersDone := oneOffProgress(ctx, "pushing layers")
-	err = images.Dispatch(ctx, images.Handlers(handlers...), ocispec.Descriptor{
-		Digest:    dgst,
-		Size:      ra.Size(),
-		MediaType: mtype,
-	})
-	layersDone(err)
-	if err != nil {
-		return err
-	}
-
-	mfstDone := oneOffProgress(ctx, fmt.Sprintf("pushing manifest for %s", ref))
-	for i := len(manifestStack) - 1; i >= 0; i-- {
-		_, err := pushHandler(ctx, manifestStack[i])
-		if err != nil {
-			mfstDone(err)
-			return err
-		}
-	}
-	mfstDone(nil)
-	return nil
-}
-
-func oneOffProgress(ctx context.Context, id string) func(err error) error {
-	pw, _, _ := progress.FromContext(ctx)
-	now := time.Now()
-	st := progress.Status{
-		Started: &now,
-	}
-	pw.Write(id, st)
-	return func(err error) error {
-		// TODO: set error on status
-		now := time.Now()
-		st.Completed = &now
-		pw.Write(id, st)
-		pw.Close()
-		return err
-	}
-}
-
-func childrenHandler(provider content.Provider) images.HandlerFunc {
-	return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
-		var descs []ocispec.Descriptor
-		switch desc.MediaType {
-		case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
-			p, err := content.ReadBlob(ctx, provider, desc)
-			if err != nil {
-				return nil, err
-			}
-
-			// TODO(stevvooe): We just assume oci manifest, for now. There may be
-			// subtle differences from the docker version.
-			var manifest ocispec.Manifest
-			if err := json.Unmarshal(p, &manifest); err != nil {
-				return nil, err
-			}
-
-			descs = append(descs, manifest.Config)
-			descs = append(descs, manifest.Layers...)
-		case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
-			p, err := content.ReadBlob(ctx, provider, desc)
-			if err != nil {
-				return nil, err
-			}
-
-			var index ocispec.Index
-			if err := json.Unmarshal(p, &index); err != nil {
-				return nil, err
-			}
-
-			for _, m := range index.Manifests {
-				if m.Digest != "" {
-					descs = append(descs, m)
-				}
-			}
-		case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip,
-			images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig,
-			ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip:
-			// childless data types.
-			return nil, nil
-		default:
-			logrus.Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType)
-		}
-
-		return descs, nil
-	}
-}

+ 113 - 0
vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_linux.go

@@ -0,0 +1,113 @@
+package specconv
+
+import (
+	"os"
+	"sort"
+	"strings"
+
+	"github.com/opencontainers/runc/libcontainer/system"
+	"github.com/opencontainers/runc/libcontainer/user"
+	"github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
+)
+
+// ToRootless converts spec to be compatible with "rootless" runc.
+// * Adds userns (Note: since we are already in userns, ideally we should not need to do this. runc-side issue is tracked at https://github.com/opencontainers/runc/issues/1837)
+// * Fix up mount flags (same as above)
+// * Replace /sys with bind-mount (FIXME: we don't need to do this if netns is unshared)
+func ToRootless(spec *specs.Spec) error {
+	if !system.RunningInUserNS() {
+		return errors.New("needs to be in user namespace")
+	}
+	uidMap, err := user.CurrentProcessUIDMap()
+	if err != nil && !os.IsNotExist(err) {
+		return err
+	}
+	gidMap, err := user.CurrentProcessUIDMap()
+	if err != nil && !os.IsNotExist(err) {
+		return err
+	}
+	return toRootless(spec, uidMap, gidMap)
+}
+
+// toRootless was forked from github.com/opencontainers/runc/libcontainer/specconv
+func toRootless(spec *specs.Spec, uidMap, gidMap []user.IDMap) error {
+	if err := configureUserNS(spec, uidMap, gidMap); err != nil {
+		return err
+	}
+	if err := configureMounts(spec); err != nil {
+		return err
+	}
+
+	// Remove cgroup settings.
+	spec.Linux.Resources = nil
+	spec.Linux.CgroupsPath = ""
+	return nil
+}
+
+// configureUserNS add suserns and the current ID map to the spec.
+// Since we are already in userns, ideally we should not need to add userns.
+// However, currently rootless runc always requires userns to be added.
+// https://github.com/opencontainers/runc/issues/1837
+func configureUserNS(spec *specs.Spec, uidMap, gidMap []user.IDMap) error {
+	spec.Linux.Namespaces = append(spec.Linux.Namespaces, specs.LinuxNamespace{
+		Type: specs.UserNamespace,
+	})
+
+	sort.Slice(uidMap, func(i, j int) bool { return uidMap[i].ID < uidMap[j].ID })
+	uNextContainerID := int64(0)
+	for _, u := range uidMap {
+		spec.Linux.UIDMappings = append(spec.Linux.UIDMappings,
+			specs.LinuxIDMapping{
+				HostID:      uint32(u.ID),
+				ContainerID: uint32(uNextContainerID),
+				Size:        uint32(u.Count),
+			})
+		uNextContainerID += int64(u.Count)
+	}
+	sort.Slice(gidMap, func(i, j int) bool { return gidMap[i].ID < gidMap[j].ID })
+	gNextContainerID := int64(0)
+	for _, g := range gidMap {
+		spec.Linux.GIDMappings = append(spec.Linux.GIDMappings,
+			specs.LinuxIDMapping{
+				HostID:      uint32(g.ID),
+				ContainerID: uint32(gNextContainerID),
+				Size:        uint32(g.Count),
+			})
+		gNextContainerID += int64(g.Count)
+	}
+	return nil
+}
+
+func configureMounts(spec *specs.Spec) error {
+	var mounts []specs.Mount
+	for _, mount := range spec.Mounts {
+		// Ignore all mounts that are under /sys, because we add /sys later.
+		if strings.HasPrefix(mount.Destination, "/sys") {
+			continue
+		}
+
+		// Remove all gid= and uid= mappings.
+		// Since we are already in userns, ideally we should not need to do this.
+		// https://github.com/opencontainers/runc/issues/1837
+		var options []string
+		for _, option := range mount.Options {
+			if !strings.HasPrefix(option, "gid=") && !strings.HasPrefix(option, "uid=") {
+				options = append(options, option)
+			}
+		}
+		mount.Options = options
+		mounts = append(mounts, mount)
+	}
+
+	// Add the sysfs mount as an rbind, because we can't mount /sys unless we have netns.
+	// TODO: keep original /sys mount when we have netns.
+	mounts = append(mounts, specs.Mount{
+		Source:      "/sys",
+		Destination: "/sys",
+		Type:        "none",
+		Options:     []string{"rbind", "nosuid", "noexec", "nodev", "ro"},
+	})
+	spec.Mounts = mounts
+	return nil
+}

+ 7 - 5
vendor/github.com/moby/buildkit/vendor.conf

@@ -6,8 +6,8 @@ github.com/davecgh/go-spew v1.1.0
 github.com/pmezard/go-difflib v1.0.0
 golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993
 
-github.com/containerd/containerd 08f7ee9828af1783dc98cc5cc1739e915697c667
-github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
+github.com/containerd/containerd b41633746ed4833f52c3c071e8edcfa2713e5677
+github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
 golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
 github.com/sirupsen/logrus v1.0.0
 google.golang.org/grpc v1.12.0
@@ -23,7 +23,7 @@ github.com/Microsoft/go-winio v0.4.7
 github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
 github.com/opencontainers/runtime-spec v1.0.1
 github.com/containerd/go-runc f271fa2021de855d4d918dbef83c5fe19db1bdd5
-github.com/containerd/console 9290d21dc56074581f619579c43d970b4514bc08
+github.com/containerd/console 5d1b48d6114b8c9666f0c8b916f871af97b0a761
 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
@@ -46,7 +46,7 @@ github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
 github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
 github.com/docker/distribution 30578ca32960a4d368bf6db67b0a33c2a1f3dc6f
 
-github.com/tonistiigi/units 29de085e9400559bd68aea2e7bc21566e7b8281d
+github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e75b9e3569de2
 github.com/docker/cli 99576756eb3303b7af8102c502f21a912e3c1af6 https://github.com/tonistiigi/docker-cli.git
 github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
 github.com/docker/libnetwork 822e5b59d346b7ad0735df2c8e445e9787320e67
@@ -60,8 +60,10 @@ github.com/uber/jaeger-lib c48167d9cae5887393dd5e61efd06a4a48b7fbb3
 github.com/codahale/hdrhistogram f8ad88b59a584afeee9d334eff879b104439117b
 
 github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc
-github.com/opencontainers/selinux 74a747aeaf2d66097b6908f572794f49f07dda2c
 
 # used by dockerfile tests
 gotest.tools v2.1.0
 github.com/google/go-cmp v0.2.0
+
+# used by rootless spec conv test
+github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0

+ 17 - 0
vendor/github.com/moby/buildkit/worker/workercontroller.go

@@ -4,6 +4,7 @@ import (
 	"sync"
 
 	"github.com/containerd/containerd/filters"
+	"github.com/moby/buildkit/client"
 	"github.com/pkg/errors"
 )
 
@@ -58,3 +59,19 @@ func (c *Controller) Get(id string) (Worker, error) {
 }
 
 // TODO: add Get(Constraint) (*Worker, error)
+
+func (c *Controller) WorkerInfos() []client.WorkerInfo {
+	workers, err := c.List()
+	if err != nil {
+		return nil
+	}
+	out := make([]client.WorkerInfo, 0, len(workers))
+	for _, w := range workers {
+		out = append(out, client.WorkerInfo{
+			ID:        w.ID(),
+			Labels:    w.Labels(),
+			Platforms: w.Platforms(),
+		})
+	}
+	return out
+}

Some files were not shown because too many files changed in this diff