Merge pull request #37582 from andrewhsu/bk

vndr containerd to a88b631, buildkit to e57eed4, and fsutil to b19464c
This commit is contained in:
Tibor Vass 2018-08-08 10:55:19 -07:00 committed by GitHub
commit a3e78ca0c6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
65 changed files with 3825 additions and 692 deletions

View file

@ -27,6 +27,7 @@ import (
pkgprogress "github.com/docker/docker/pkg/progress"
"github.com/docker/docker/reference"
"github.com/moby/buildkit/cache"
gw "github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/auth"
"github.com/moby/buildkit/source"
@ -113,7 +114,7 @@ func (is *imageSource) resolveLocal(refStr string) ([]byte, error) {
return img.RawJSON(), nil
}
func (is *imageSource) ResolveImageConfig(ctx context.Context, ref string, platform *ocispec.Platform) (digest.Digest, []byte, error) {
func (is *imageSource) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error) {
if preferLocal {
dt, err := is.resolveLocal(ref)
if err == nil {
@ -126,7 +127,7 @@ func (is *imageSource) ResolveImageConfig(ctx context.Context, ref string, platf
dt []byte
}
res, err := is.g.Do(ctx, ref, func(ctx context.Context) (interface{}, error) {
dgst, dt, err := imageutil.Config(ctx, ref, is.getResolver(ctx), is.ContentStore, platform)
dgst, dt, err := imageutil.Config(ctx, ref, is.getResolver(ctx), is.ContentStore, opt.Platform)
if err != nil {
return nil, err
}
@ -257,7 +258,7 @@ func (p *puller) resolve(ctx context.Context) error {
return
}
_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), &p.platform)
_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), gw.ResolveImageConfigOpt{Platform: &p.platform})
if err != nil {
p.resolveErr = err
resolveProgressDone(err)

View file

@ -2,6 +2,7 @@ package containerimage
import (
"context"
"encoding/json"
"fmt"
"strings"
@ -9,15 +10,15 @@ import (
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/reference"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/exporter"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
keyImageName = "name"
exporterImageConfig = "containerimage.config"
keyImageName = "name"
)
// Differ can make a moby layer from a snapshot
@ -54,8 +55,11 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
}
i.targetNames = append(i.targetNames, ref)
}
case exporterImageConfig:
i.config = []byte(v)
case exptypes.ExporterImageConfigKey:
if i.meta == nil {
i.meta = make(map[string][]byte)
}
i.meta[k] = []byte(v)
default:
logrus.Warnf("image exporter: unknown option %s", k)
}
@ -66,18 +70,47 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
type imageExporterInstance struct {
*imageExporter
targetNames []distref.Named
config []byte
meta map[string][]byte
}
func (e *imageExporterInstance) Name() string {
return "exporting to image"
}
func (e *imageExporterInstance) Export(ctx context.Context, ref cache.ImmutableRef, opt map[string][]byte) (map[string]string, error) {
if config, ok := opt[exporterImageConfig]; ok {
e.config = config
func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source) (map[string]string, error) {
if len(inp.Refs) > 1 {
return nil, fmt.Errorf("exporting multiple references to image store is currently unsupported")
}
ref := inp.Ref
if ref != nil && len(inp.Refs) == 1 {
return nil, fmt.Errorf("invalid exporter input: Ref and Refs are mutually exclusive")
}
// only one loop
for _, v := range inp.Refs {
ref = v
}
var config []byte
switch len(inp.Refs) {
case 0:
config = inp.Metadata[exptypes.ExporterImageConfigKey]
case 1:
platformsBytes, ok := inp.Metadata[exptypes.ExporterPlatformsKey]
if !ok {
return nil, fmt.Errorf("cannot export image, missing platforms mapping")
}
var p exptypes.Platforms
if err := json.Unmarshal(platformsBytes, &p); err != nil {
return nil, errors.Wrapf(err, "failed to parse platforms passed to exporter")
}
if len(p.Platforms) != len(inp.Refs) {
return nil, errors.Errorf("number of platforms does not match references %d %d", len(p.Platforms), len(inp.Refs))
}
config = inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, p.Platforms[0].ID)]
}
config := e.config
var diffs []digest.Digest
if ref != nil {

View file

@ -24,6 +24,7 @@ import (
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/exporter"
"github.com/moby/buildkit/frontend"
gw "github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver"
@ -141,7 +142,7 @@ func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge) (solve
case *pb.Op_Source:
return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, w)
case *pb.Op_Exec:
return ops.NewExecOp(v, op, w.CacheManager, w.MetadataStore, w.Executor, w)
return ops.NewExecOp(v, op, w.CacheManager, w.Opt.SessionManager, w.MetadataStore, w.Executor, w)
case *pb.Op_Build:
return ops.NewBuildOp(v, op, s, w)
}
@ -150,13 +151,13 @@ func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge) (solve
}
// ResolveImageConfig returns image config for an image
func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, platform *ocispec.Platform) (digest.Digest, []byte, error) {
func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error) {
// ImageSource is typically source/containerimage
resolveImageConfig, ok := w.ImageSource.(resolveImageConfig)
if !ok {
return "", nil, errors.Errorf("worker %q does not implement ResolveImageConfig", w.ID())
}
return resolveImageConfig.ResolveImageConfig(ctx, ref, platform)
return resolveImageConfig.ResolveImageConfig(ctx, ref, opt)
}
// Exec executes a process directly on a worker
@ -175,8 +176,8 @@ func (w *Worker) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*cl
}
// Prune deletes reclaimable build cache
func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo) error {
return w.CacheManager.Prune(ctx, ch)
func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, info client.PruneInfo) error {
return w.CacheManager.Prune(ctx, ch, info)
}
// Exporter returns exporter by name
@ -327,5 +328,5 @@ func oneOffProgress(ctx context.Context, id string) func(err error) error {
}
type resolveImageConfig interface {
ResolveImageConfig(ctx context.Context, ref string, platform *ocispec.Platform) (digest.Digest, []byte, error)
ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error)
}

View file

@ -26,8 +26,8 @@ github.com/imdario/mergo v0.3.6
golang.org/x/sync 1d60e4601c6fd243af51cc01ddf169918a5407ca
# buildkit
github.com/moby/buildkit 98f1604134f945d48538ffca0e18662337b4a850
github.com/tonistiigi/fsutil 8abad97ee3969cdf5e9c367f46adba2c212b3ddb
github.com/moby/buildkit e57eed420c7573ae44875be98fa877175b4677a1
github.com/tonistiigi/fsutil b19464cd1b6a00773b4f2eb7acf9c30426f9df42
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716

View file

@ -57,6 +57,8 @@ var _ = time.Kitchen
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type PruneRequest struct {
Filter []string `protobuf:"bytes,1,rep,name=filter" json:"filter,omitempty"`
All bool `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"`
}
func (m *PruneRequest) Reset() { *m = PruneRequest{} }
@ -64,8 +66,22 @@ func (m *PruneRequest) String() string { return proto.CompactTextStri
func (*PruneRequest) ProtoMessage() {}
func (*PruneRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{0} }
func (m *PruneRequest) GetFilter() []string {
if m != nil {
return m.Filter
}
return nil
}
func (m *PruneRequest) GetAll() bool {
if m != nil {
return m.All
}
return false
}
type DiskUsageRequest struct {
Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
Filter []string `protobuf:"bytes,1,rep,name=filter" json:"filter,omitempty"`
}
func (m *DiskUsageRequest) Reset() { *m = DiskUsageRequest{} }
@ -73,11 +89,11 @@ func (m *DiskUsageRequest) String() string { return proto.CompactText
func (*DiskUsageRequest) ProtoMessage() {}
func (*DiskUsageRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{1} }
func (m *DiskUsageRequest) GetFilter() string {
func (m *DiskUsageRequest) GetFilter() []string {
if m != nil {
return m.Filter
}
return ""
return nil
}
type DiskUsageResponse struct {
@ -106,6 +122,8 @@ type UsageRecord struct {
LastUsedAt *time.Time `protobuf:"bytes,7,opt,name=LastUsedAt,stdtime" json:"LastUsedAt,omitempty"`
UsageCount int64 `protobuf:"varint,8,opt,name=UsageCount,proto3" json:"UsageCount,omitempty"`
Description string `protobuf:"bytes,9,opt,name=Description,proto3" json:"Description,omitempty"`
RecordType string `protobuf:"bytes,10,opt,name=RecordType,proto3" json:"RecordType,omitempty"`
Shared bool `protobuf:"varint,11,opt,name=Shared,proto3" json:"Shared,omitempty"`
}
func (m *UsageRecord) Reset() { *m = UsageRecord{} }
@ -176,6 +194,20 @@ func (m *UsageRecord) GetDescription() string {
return ""
}
func (m *UsageRecord) GetRecordType() string {
if m != nil {
return m.RecordType
}
return ""
}
func (m *UsageRecord) GetShared() bool {
if m != nil {
return m.Shared
}
return false
}
type SolveRequest struct {
Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"`
Definition *pb.Definition `protobuf:"bytes,2,opt,name=Definition" json:"Definition,omitempty"`
@ -898,6 +930,31 @@ func (m *PruneRequest) MarshalTo(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if len(m.Filter) > 0 {
for _, s := range m.Filter {
dAtA[i] = 0xa
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
if m.All {
dAtA[i] = 0x10
i++
if m.All {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
return i, nil
}
@ -917,10 +974,19 @@ func (m *DiskUsageRequest) MarshalTo(dAtA []byte) (int, error) {
var l int
_ = l
if len(m.Filter) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintControl(dAtA, i, uint64(len(m.Filter)))
i += copy(dAtA[i:], m.Filter)
for _, s := range m.Filter {
dAtA[i] = 0xa
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
return i, nil
}
@ -1036,6 +1102,22 @@ func (m *UsageRecord) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintControl(dAtA, i, uint64(len(m.Description)))
i += copy(dAtA[i:], m.Description)
}
if len(m.RecordType) > 0 {
dAtA[i] = 0x52
i++
i = encodeVarintControl(dAtA, i, uint64(len(m.RecordType)))
i += copy(dAtA[i:], m.RecordType)
}
if m.Shared {
dAtA[i] = 0x58
i++
if m.Shared {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
return i, nil
}
@ -1599,15 +1681,26 @@ func encodeVarintControl(dAtA []byte, offset int, v uint64) int {
func (m *PruneRequest) Size() (n int) {
var l int
_ = l
if len(m.Filter) > 0 {
for _, s := range m.Filter {
l = len(s)
n += 1 + l + sovControl(uint64(l))
}
}
if m.All {
n += 2
}
return n
}
func (m *DiskUsageRequest) Size() (n int) {
var l int
_ = l
l = len(m.Filter)
if l > 0 {
n += 1 + l + sovControl(uint64(l))
if len(m.Filter) > 0 {
for _, s := range m.Filter {
l = len(s)
n += 1 + l + sovControl(uint64(l))
}
}
return n
}
@ -1657,6 +1750,13 @@ func (m *UsageRecord) Size() (n int) {
if l > 0 {
n += 1 + l + sovControl(uint64(l))
}
l = len(m.RecordType)
if l > 0 {
n += 1 + l + sovControl(uint64(l))
}
if m.Shared {
n += 2
}
return n
}
@ -1940,6 +2040,55 @@ func (m *PruneRequest) Unmarshal(dAtA []byte) error {
return fmt.Errorf("proto: PruneRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthControl
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field All", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.All = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipControl(dAtA[iNdEx:])
@ -2017,7 +2166,7 @@ func (m *DiskUsageRequest) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Filter = string(dAtA[iNdEx:postIndex])
m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
@ -2378,6 +2527,55 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error {
}
m.Description = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 10:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RecordType", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthControl
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RecordType = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 11:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Shared", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Shared = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipControl(dAtA[iNdEx:])
@ -4511,79 +4709,81 @@ var (
func init() { proto.RegisterFile("control.proto", fileDescriptorControl) }
var fileDescriptorControl = []byte{
// 1176 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x4d, 0x6f, 0x23, 0x45,
0x13, 0x7e, 0xc7, 0x76, 0xfc, 0x51, 0x76, 0xa2, 0xbc, 0x0d, 0xac, 0x46, 0x03, 0x24, 0x66, 0x00,
0xc9, 0x5a, 0xed, 0xce, 0x64, 0x03, 0x2b, 0xa1, 0x08, 0xad, 0x76, 0x1d, 0x2f, 0x22, 0x51, 0x22,
0x96, 0xce, 0x86, 0x95, 0xb8, 0x8d, 0xed, 0x8e, 0x77, 0x14, 0x7b, 0x7a, 0xe8, 0xee, 0x09, 0x6b,
0x7e, 0x05, 0x07, 0xfe, 0x09, 0x07, 0xce, 0x1c, 0x90, 0xf6, 0xc8, 0x99, 0x43, 0x16, 0xe5, 0x0e,
0xbf, 0x01, 0xf5, 0xc7, 0xd8, 0xed, 0xd8, 0xf9, 0xdc, 0x53, 0xba, 0x2a, 0x4f, 0x3d, 0x53, 0x5d,
0x4f, 0xb9, 0xab, 0x60, 0xb9, 0x47, 0x13, 0xc1, 0xe8, 0x30, 0x48, 0x19, 0x15, 0x14, 0xad, 0x8e,
0x68, 0x77, 0x1c, 0x74, 0xb3, 0x78, 0xd8, 0x3f, 0x8e, 0x45, 0x70, 0xf2, 0xc0, 0xbb, 0x3f, 0x88,
0xc5, 0xcb, 0xac, 0x1b, 0xf4, 0xe8, 0x28, 0x1c, 0xd0, 0x01, 0x0d, 0x15, 0xb0, 0x9b, 0x1d, 0x29,
0x4b, 0x19, 0xea, 0xa4, 0x09, 0xbc, 0xf5, 0x01, 0xa5, 0x83, 0x21, 0x99, 0xa2, 0x44, 0x3c, 0x22,
0x5c, 0x44, 0xa3, 0xd4, 0x00, 0xee, 0x59, 0x7c, 0xf2, 0x63, 0x61, 0xfe, 0xb1, 0x90, 0xd3, 0xe1,
0x09, 0x61, 0x61, 0xda, 0x0d, 0x69, 0xca, 0x0d, 0x3a, 0xbc, 0x10, 0x1d, 0xa5, 0x71, 0x28, 0xc6,
0x29, 0xe1, 0xe1, 0x8f, 0x94, 0x1d, 0x13, 0xa6, 0x03, 0xfc, 0x15, 0x68, 0x3c, 0x63, 0x59, 0x42,
0x30, 0xf9, 0x21, 0x23, 0x5c, 0xf8, 0x77, 0x61, 0xb5, 0x13, 0xf3, 0xe3, 0x43, 0x1e, 0x0d, 0x72,
0x1f, 0xba, 0x03, 0xe5, 0xa3, 0x78, 0x28, 0x08, 0x73, 0x9d, 0xa6, 0xd3, 0xaa, 0x61, 0x63, 0xf9,
0xbb, 0xf0, 0x7f, 0x0b, 0xcb, 0x53, 0x9a, 0x70, 0x82, 0x1e, 0x42, 0x99, 0x91, 0x1e, 0x65, 0x7d,
0xd7, 0x69, 0x16, 0x5b, 0xf5, 0xcd, 0x0f, 0x83, 0xf3, 0x25, 0x0a, 0x4c, 0x80, 0x04, 0x61, 0x03,
0xf6, 0x7f, 0x2f, 0x40, 0xdd, 0xf2, 0xa3, 0x15, 0x28, 0xec, 0x74, 0xcc, 0xf7, 0x0a, 0x3b, 0x1d,
0xe4, 0x42, 0x65, 0x3f, 0x13, 0x51, 0x77, 0x48, 0xdc, 0x42, 0xd3, 0x69, 0x55, 0x71, 0x6e, 0xa2,
0x77, 0x61, 0x69, 0x27, 0x39, 0xe4, 0xc4, 0x2d, 0x2a, 0xbf, 0x36, 0x10, 0x82, 0xd2, 0x41, 0xfc,
0x13, 0x71, 0x4b, 0x4d, 0xa7, 0x55, 0xc4, 0xea, 0x2c, 0xef, 0xf1, 0x2c, 0x62, 0x24, 0x11, 0xee,
0x92, 0xbe, 0x87, 0xb6, 0x50, 0x1b, 0x6a, 0xdb, 0x8c, 0x44, 0x82, 0xf4, 0x9f, 0x08, 0xb7, 0xdc,
0x74, 0x5a, 0xf5, 0x4d, 0x2f, 0xd0, 0xba, 0x04, 0xb9, 0x2e, 0xc1, 0xf3, 0x5c, 0x97, 0x76, 0xf5,
0xf5, 0xe9, 0xfa, 0xff, 0x7e, 0x7e, 0xb3, 0xee, 0xe0, 0x69, 0x18, 0x7a, 0x0c, 0xb0, 0x17, 0x71,
0x71, 0xc8, 0x15, 0x49, 0xe5, 0x4a, 0x92, 0x92, 0x22, 0xb0, 0x62, 0xd0, 0x1a, 0x80, 0x2a, 0xc0,
0x36, 0xcd, 0x12, 0xe1, 0x56, 0x55, 0xde, 0x96, 0x07, 0x35, 0xa1, 0xde, 0x21, 0xbc, 0xc7, 0xe2,
0x54, 0xc4, 0x34, 0x71, 0x6b, 0xea, 0x0a, 0xb6, 0xcb, 0xff, 0xa5, 0x04, 0x8d, 0x03, 0xd9, 0x14,
0xb9, 0x70, 0xab, 0x50, 0xc4, 0xe4, 0xc8, 0x54, 0x51, 0x1e, 0x51, 0x00, 0xd0, 0x21, 0x47, 0x71,
0x12, 0x2b, 0x8e, 0x82, 0x4a, 0x73, 0x25, 0x48, 0xbb, 0xc1, 0xd4, 0x8b, 0x2d, 0x04, 0xf2, 0xa0,
0xfa, 0xf4, 0x55, 0x4a, 0x99, 0x14, 0xbf, 0xa8, 0x68, 0x26, 0x36, 0x7a, 0x01, 0xcb, 0xf9, 0xf9,
0x89, 0x10, 0x8c, 0xbb, 0x25, 0x25, 0xf8, 0x83, 0x79, 0xc1, 0xed, 0xa4, 0x82, 0x99, 0x98, 0xa7,
0x89, 0x60, 0x63, 0x3c, 0xcb, 0x23, 0xb5, 0x3e, 0x20, 0x9c, 0xcb, 0x0c, 0xb5, 0x50, 0xb9, 0x29,
0xd3, 0xf9, 0x8a, 0xd1, 0x44, 0x90, 0xa4, 0xaf, 0x84, 0xaa, 0xe1, 0x89, 0x2d, 0xd3, 0xc9, 0xcf,
0x3a, 0x9d, 0xca, 0xb5, 0xd2, 0x99, 0x89, 0x31, 0xe9, 0xcc, 0xf8, 0xd0, 0x16, 0x2c, 0x6d, 0x47,
0xbd, 0x97, 0x44, 0x69, 0x52, 0xdf, 0x5c, 0x9b, 0x27, 0x54, 0xff, 0xfe, 0x46, 0x89, 0xc0, 0xdb,
0x25, 0xd9, 0x1e, 0x58, 0x87, 0x78, 0x8f, 0x01, 0xcd, 0xdf, 0x57, 0xea, 0x72, 0x4c, 0xc6, 0xb9,
0x2e, 0xc7, 0x64, 0x2c, 0x9b, 0xf8, 0x24, 0x1a, 0x66, 0xba, 0xb9, 0x6b, 0x58, 0x1b, 0x5b, 0x85,
0x2f, 0x1c, 0xc9, 0x30, 0x9f, 0xe2, 0x4d, 0x18, 0xfc, 0x37, 0x0e, 0x34, 0xec, 0x0c, 0xd1, 0x07,
0x50, 0xd3, 0x49, 0x4d, 0x9b, 0x63, 0xea, 0x90, 0x7d, 0xb8, 0x33, 0x32, 0x06, 0x77, 0x0b, 0xcd,
0x62, 0xab, 0x86, 0x2d, 0x0f, 0xfa, 0x16, 0xea, 0x1a, 0xac, 0xab, 0x5c, 0x54, 0x55, 0x0e, 0x2f,
0x2f, 0x4a, 0x60, 0x45, 0xe8, 0x1a, 0xdb, 0x1c, 0xde, 0x23, 0x58, 0x3d, 0x0f, 0xb8, 0xd1, 0x0d,
0x7f, 0x73, 0x60, 0xd9, 0x88, 0x6a, 0x5e, 0xa1, 0x28, 0x67, 0x24, 0x2c, 0xf7, 0x99, 0xf7, 0xe8,
0xe1, 0x85, 0xfd, 0xa0, 0x61, 0xc1, 0xf9, 0x38, 0x9d, 0xef, 0x1c, 0x9d, 0xb7, 0x0d, 0xef, 0x2d,
0x84, 0xde, 0x28, 0xf3, 0x8f, 0x60, 0xf9, 0x40, 0x44, 0x22, 0xe3, 0x17, 0xfe, 0x64, 0xfd, 0x5f,
0x1d, 0x58, 0xc9, 0x31, 0xe6, 0x76, 0x9f, 0x43, 0xf5, 0x84, 0x30, 0x41, 0x5e, 0x11, 0x6e, 0x6e,
0xe5, 0xce, 0xdf, 0xea, 0x3b, 0x85, 0xc0, 0x13, 0x24, 0xda, 0x82, 0x2a, 0x57, 0x3c, 0x44, 0xcb,
0xba, 0xb0, 0x95, 0x75, 0x94, 0xf9, 0xde, 0x04, 0x8f, 0x42, 0x28, 0x0d, 0xe9, 0x20, 0x57, 0xfb,
0xfd, 0x8b, 0xe2, 0xf6, 0xe8, 0x00, 0x2b, 0xa0, 0x7f, 0x5a, 0x80, 0xb2, 0xf6, 0xa1, 0x5d, 0x28,
0xf7, 0xe3, 0x01, 0xe1, 0x42, 0xdf, 0xaa, 0xbd, 0x29, 0x7f, 0x20, 0x7f, 0x9d, 0xae, 0xdf, 0xb5,
0x66, 0x15, 0x4d, 0x49, 0x22, 0x27, 0x6b, 0x14, 0x27, 0x84, 0xf1, 0x70, 0x40, 0xef, 0xeb, 0x90,
0xa0, 0xa3, 0xfe, 0x60, 0xc3, 0x20, 0xb9, 0xe2, 0x24, 0xcd, 0x84, 0x69, 0xcc, 0xdb, 0x71, 0x69,
0x06, 0x39, 0x22, 0x92, 0x68, 0x44, 0xcc, 0xbb, 0xa6, 0xce, 0x72, 0x44, 0xf4, 0x64, 0xdf, 0xf6,
0xd5, 0xe0, 0xa8, 0x62, 0x63, 0xa1, 0x2d, 0xa8, 0x70, 0x11, 0x31, 0x41, 0xfa, 0xea, 0x49, 0xba,
0xce, 0xdb, 0x9e, 0x07, 0xa0, 0x47, 0x50, 0xeb, 0xd1, 0x51, 0x3a, 0x24, 0x32, 0xba, 0x7c, 0xcd,
0xe8, 0x69, 0x88, 0xec, 0x1e, 0xc2, 0x18, 0x65, 0x6a, 0xaa, 0xd4, 0xb0, 0x36, 0xfc, 0x7f, 0x0b,
0xd0, 0xb0, 0xc5, 0x9a, 0x9b, 0x98, 0xbb, 0x50, 0xd6, 0xd2, 0xeb, 0xae, 0xbb, 0x5d, 0xa9, 0x34,
0xc3, 0xc2, 0x52, 0xb9, 0x50, 0xe9, 0x65, 0x4c, 0x8d, 0x53, 0x3d, 0x64, 0x73, 0x53, 0x26, 0x2c,
0xa8, 0x88, 0x86, 0xaa, 0x54, 0x45, 0xac, 0x0d, 0x39, 0x65, 0x27, 0xbb, 0xcd, 0xcd, 0xa6, 0xec,
0x24, 0xcc, 0x96, 0xa1, 0xf2, 0x56, 0x32, 0x54, 0x6f, 0x2c, 0x83, 0xff, 0x87, 0x03, 0xb5, 0x49,
0x97, 0x5b, 0xd5, 0x75, 0xde, 0xba, 0xba, 0x33, 0x95, 0x29, 0xdc, 0xae, 0x32, 0x77, 0xa0, 0xcc,
0x05, 0x23, 0xd1, 0x48, 0x69, 0x54, 0xc4, 0xc6, 0x92, 0xef, 0xc9, 0x88, 0x0f, 0x94, 0x42, 0x0d,
0x2c, 0x8f, 0xbe, 0x0f, 0x8d, 0xf6, 0x58, 0x10, 0xbe, 0x4f, 0xb8, 0x5c, 0x2e, 0xa4, 0xb6, 0xfd,
0x48, 0x44, 0xea, 0x1e, 0x0d, 0xac, 0xce, 0xfe, 0x3d, 0x40, 0x7b, 0x31, 0x17, 0x2f, 0xd4, 0xa6,
0xc8, 0x17, 0xed, 0x81, 0x45, 0x6b, 0x0f, 0x3c, 0x80, 0x77, 0x66, 0xd0, 0xe6, 0x95, 0xfa, 0xf2,
0xdc, 0x26, 0xf8, 0xc9, 0xfc, 0xab, 0xa1, 0x16, 0xd2, 0x40, 0x07, 0xce, 0x2e, 0x84, 0x9b, 0xff,
0x14, 0xa1, 0xb2, 0xad, 0x77, 0x6d, 0xf4, 0x1c, 0x6a, 0x93, 0x45, 0x13, 0xf9, 0xf3, 0x34, 0xe7,
0x37, 0x56, 0xef, 0xe3, 0x4b, 0x31, 0x26, 0xbf, 0xaf, 0x61, 0x49, 0xad, 0xbe, 0x68, 0xc1, 0x33,
0x68, 0xef, 0xc4, 0xde, 0xe5, 0x2b, 0xec, 0x86, 0x23, 0x99, 0xd4, 0x0c, 0x59, 0xc4, 0x64, 0x2f,
0x1b, 0xde, 0xfa, 0x15, 0xc3, 0x07, 0xed, 0x43, 0xd9, 0xfc, 0x9c, 0x17, 0x41, 0xed, 0x49, 0xe1,
0x35, 0x2f, 0x06, 0x68, 0xb2, 0x0d, 0x07, 0xed, 0x4f, 0x36, 0xa9, 0x45, 0xa9, 0xd9, 0x6d, 0xe0,
0x5d, 0xf1, 0xff, 0x96, 0xb3, 0xe1, 0xa0, 0xef, 0xa1, 0x6e, 0x09, 0x8d, 0x16, 0x08, 0x3a, 0xdf,
0x35, 0xde, 0xa7, 0x57, 0xa0, 0x74, 0xb2, 0xed, 0xc6, 0xeb, 0xb3, 0x35, 0xe7, 0xcf, 0xb3, 0x35,
0xe7, 0xef, 0xb3, 0x35, 0xa7, 0x5b, 0x56, 0x7d, 0xff, 0xd9, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff,
0xe1, 0xef, 0xcc, 0xf5, 0x6f, 0x0d, 0x00, 0x00,
// 1206 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x4f, 0x6f, 0x1b, 0x45,
0x14, 0x67, 0x6d, 0xc7, 0x7f, 0x9e, 0x9d, 0x2a, 0x0c, 0x50, 0xad, 0x16, 0x48, 0xcc, 0x02, 0x92,
0x55, 0xb5, 0xbb, 0x6d, 0xa0, 0x52, 0x15, 0xa1, 0xaa, 0x75, 0x5c, 0x44, 0xaa, 0x44, 0x94, 0x75,
0x42, 0x25, 0x6e, 0x6b, 0x7b, 0xe2, 0xac, 0xb2, 0xde, 0x59, 0x66, 0x66, 0x43, 0xcd, 0xa7, 0xe0,
0xc0, 0x37, 0xe1, 0xc0, 0x27, 0x40, 0xea, 0x91, 0x33, 0x87, 0x14, 0xe5, 0x0e, 0x77, 0x6e, 0x68,
0xfe, 0xac, 0x3d, 0x8e, 0x9d, 0x38, 0x49, 0x4f, 0x99, 0x37, 0xf9, 0xbd, 0x9f, 0xdf, 0x7b, 0xbf,
0xb7, 0xf3, 0x1e, 0xac, 0xf6, 0x49, 0xc2, 0x29, 0x89, 0xbd, 0x94, 0x12, 0x4e, 0xd0, 0xda, 0x88,
0xf4, 0xc6, 0x5e, 0x2f, 0x8b, 0xe2, 0xc1, 0x71, 0xc4, 0xbd, 0x93, 0x07, 0xce, 0xbd, 0x61, 0xc4,
0x8f, 0xb2, 0x9e, 0xd7, 0x27, 0x23, 0x7f, 0x48, 0x86, 0xc4, 0x97, 0xc0, 0x5e, 0x76, 0x28, 0x2d,
0x69, 0xc8, 0x93, 0x22, 0x70, 0x36, 0x86, 0x84, 0x0c, 0x63, 0x3c, 0x45, 0xf1, 0x68, 0x84, 0x19,
0x0f, 0x47, 0xa9, 0x06, 0xdc, 0x35, 0xf8, 0xc4, 0x8f, 0xf9, 0xf9, 0x8f, 0xf9, 0x8c, 0xc4, 0x27,
0x98, 0xfa, 0x69, 0xcf, 0x27, 0x29, 0xd3, 0x68, 0xff, 0x42, 0x74, 0x98, 0x46, 0x3e, 0x1f, 0xa7,
0x98, 0xf9, 0x3f, 0x11, 0x7a, 0x8c, 0xa9, 0x72, 0x70, 0x1f, 0x41, 0xe3, 0x05, 0xcd, 0x12, 0x1c,
0xe0, 0x1f, 0x33, 0xcc, 0x38, 0xba, 0x0d, 0xe5, 0xc3, 0x28, 0xe6, 0x98, 0xda, 0x56, 0xb3, 0xd8,
0xaa, 0x05, 0xda, 0x42, 0x6b, 0x50, 0x0c, 0xe3, 0xd8, 0x2e, 0x34, 0xad, 0x56, 0x35, 0x10, 0x47,
0xf7, 0x0e, 0xac, 0x75, 0x22, 0x76, 0x7c, 0xc0, 0xc2, 0xe1, 0x32, 0x6f, 0xf7, 0x39, 0xbc, 0x6b,
0x60, 0x59, 0x4a, 0x12, 0x86, 0xd1, 0x43, 0x28, 0x53, 0xdc, 0x27, 0x74, 0x20, 0xc1, 0xf5, 0xcd,
0x8f, 0xbd, 0xf3, 0xc5, 0xf4, 0xb4, 0x83, 0x00, 0x05, 0x1a, 0xec, 0xfe, 0x57, 0x80, 0xba, 0x71,
0x8f, 0x6e, 0x41, 0x61, 0xa7, 0x63, 0x5b, 0x4d, 0xab, 0x55, 0x0b, 0x0a, 0x3b, 0x1d, 0x64, 0x43,
0x65, 0x2f, 0xe3, 0x61, 0x2f, 0xc6, 0x3a, 0xda, 0xdc, 0x44, 0xef, 0xc3, 0xca, 0x4e, 0x72, 0xc0,
0xb0, 0x5d, 0x94, 0xf7, 0xca, 0x40, 0x08, 0x4a, 0xdd, 0xe8, 0x67, 0x6c, 0x97, 0x9a, 0x56, 0xab,
0x18, 0xc8, 0xb3, 0xc8, 0xe3, 0x45, 0x48, 0x71, 0xc2, 0xed, 0x15, 0xc9, 0xab, 0x2d, 0xd4, 0x86,
0xda, 0x36, 0xc5, 0x21, 0xc7, 0x83, 0xa7, 0xdc, 0x2e, 0x37, 0xad, 0x56, 0x7d, 0xd3, 0xf1, 0x94,
0x82, 0x5e, 0xae, 0xa0, 0xb7, 0x9f, 0x2b, 0xd8, 0xae, 0xbe, 0x3e, 0xdd, 0x78, 0xe7, 0x97, 0x37,
0x1b, 0x56, 0x30, 0x75, 0x43, 0x4f, 0x00, 0x76, 0x43, 0xc6, 0x0f, 0x98, 0x24, 0xa9, 0x2c, 0x25,
0x29, 0x49, 0x02, 0xc3, 0x07, 0xad, 0x03, 0xc8, 0x02, 0x6c, 0x93, 0x2c, 0xe1, 0x76, 0x55, 0xc6,
0x6d, 0xdc, 0xa0, 0x26, 0xd4, 0x3b, 0x98, 0xf5, 0x69, 0x94, 0xf2, 0x88, 0x24, 0x76, 0x4d, 0xa6,
0x60, 0x5e, 0x09, 0x06, 0x55, 0xbd, 0xfd, 0x71, 0x8a, 0x6d, 0x90, 0x00, 0xe3, 0x46, 0xe4, 0xdf,
0x3d, 0x0a, 0x29, 0x1e, 0xd8, 0x75, 0x59, 0x2a, 0x6d, 0xb9, 0xbf, 0x96, 0xa0, 0xd1, 0x15, 0x6d,
0x97, 0x0b, 0xbe, 0x06, 0xc5, 0x00, 0x1f, 0xea, 0xea, 0x8b, 0x23, 0xf2, 0x00, 0x3a, 0xf8, 0x30,
0x4a, 0x22, 0xf9, 0xdb, 0x05, 0x99, 0xde, 0x2d, 0x2f, 0xed, 0x79, 0xd3, 0xdb, 0xc0, 0x40, 0x20,
0x07, 0xaa, 0xcf, 0x5e, 0xa5, 0x84, 0x8a, 0xa6, 0x29, 0x4a, 0x9a, 0x89, 0x8d, 0x5e, 0xc2, 0x6a,
0x7e, 0x7e, 0xca, 0x39, 0x65, 0x76, 0x49, 0x36, 0xca, 0x83, 0xf9, 0x46, 0x31, 0x83, 0xf2, 0x66,
0x7c, 0x9e, 0x25, 0x9c, 0x8e, 0x83, 0x59, 0x1e, 0xd1, 0x23, 0x5d, 0xcc, 0x98, 0x88, 0x50, 0x09,
0x9c, 0x9b, 0x22, 0x9c, 0xaf, 0x29, 0x49, 0x38, 0x4e, 0x06, 0x52, 0xe0, 0x5a, 0x30, 0xb1, 0x45,
0x38, 0xf9, 0x59, 0x85, 0x53, 0xb9, 0x52, 0x38, 0x33, 0x3e, 0x3a, 0x9c, 0x99, 0x3b, 0xb4, 0x05,
0x2b, 0xdb, 0x61, 0xff, 0x08, 0x4b, 0x2d, 0xeb, 0x9b, 0xeb, 0xf3, 0x84, 0xf2, 0xdf, 0xdf, 0x4a,
0xf1, 0x58, 0xbb, 0x24, 0xda, 0x2a, 0x50, 0x2e, 0xce, 0x13, 0x40, 0xf3, 0xf9, 0x0a, 0x5d, 0x8e,
0xf1, 0x38, 0xd7, 0xe5, 0x18, 0x8f, 0x45, 0xf3, 0x9f, 0x84, 0x71, 0xa6, 0x3e, 0x8a, 0x5a, 0xa0,
0x8c, 0xad, 0xc2, 0x23, 0x4b, 0x30, 0xcc, 0x87, 0x78, 0x1d, 0x06, 0xf7, 0x8d, 0x05, 0x0d, 0x33,
0x42, 0xf4, 0x11, 0xd4, 0x54, 0x50, 0xd3, 0xe6, 0x98, 0x5e, 0x88, 0xee, 0xdb, 0x19, 0x69, 0x83,
0xd9, 0x05, 0xf9, 0x52, 0x18, 0x37, 0xe8, 0x3b, 0xa8, 0x2b, 0xb0, 0xaa, 0x72, 0x51, 0x56, 0xd9,
0xbf, 0xbc, 0x28, 0x9e, 0xe1, 0xa1, 0x6a, 0x6c, 0x72, 0x38, 0x8f, 0x61, 0xed, 0x3c, 0xe0, 0x5a,
0x19, 0xfe, 0x6e, 0xc1, 0xaa, 0x16, 0x55, 0xbf, 0x5e, 0x61, 0xce, 0x88, 0x69, 0x7e, 0xa7, 0xdf,
0xb1, 0x87, 0x17, 0xf6, 0x83, 0x82, 0x79, 0xe7, 0xfd, 0x54, 0xbc, 0x73, 0x74, 0xce, 0x36, 0x7c,
0xb0, 0x10, 0x7a, 0xad, 0xc8, 0x3f, 0x81, 0xd5, 0x2e, 0x0f, 0x79, 0xc6, 0x2e, 0xfc, 0x64, 0xdd,
0xdf, 0x2c, 0xb8, 0x95, 0x63, 0x74, 0x76, 0x5f, 0x42, 0xf5, 0x04, 0x53, 0x8e, 0x5f, 0x61, 0xa6,
0xb3, 0xb2, 0xe7, 0xb3, 0xfa, 0x5e, 0x22, 0x82, 0x09, 0x12, 0x6d, 0x41, 0x95, 0x49, 0x1e, 0xac,
0x64, 0x5d, 0xd8, 0xca, 0xca, 0x4b, 0xff, 0xde, 0x04, 0x8f, 0x7c, 0x28, 0xc5, 0x64, 0x98, 0xab,
0xfd, 0xe1, 0x45, 0x7e, 0xbb, 0x64, 0x18, 0x48, 0xa0, 0x7b, 0x5a, 0x80, 0xb2, 0xba, 0x43, 0xcf,
0xa1, 0x3c, 0x88, 0x86, 0x98, 0x71, 0x95, 0x55, 0x7b, 0x53, 0x7c, 0x20, 0x7f, 0x9d, 0x6e, 0xdc,
0x31, 0xa6, 0x21, 0x49, 0x71, 0x22, 0x66, 0x77, 0x18, 0x25, 0x98, 0x32, 0x7f, 0x48, 0xee, 0x29,
0x17, 0xaf, 0x23, 0xff, 0x04, 0x9a, 0x41, 0x70, 0x45, 0x49, 0x9a, 0x71, 0xdd, 0x98, 0x37, 0xe3,
0x52, 0x0c, 0x62, 0xb4, 0x24, 0xe1, 0x08, 0xeb, 0x77, 0x4d, 0x9e, 0xc5, 0xd3, 0xda, 0x17, 0x7d,
0x3b, 0x90, 0x03, 0xa7, 0x1a, 0x68, 0x0b, 0x6d, 0x41, 0x85, 0xf1, 0x90, 0x72, 0x3c, 0x90, 0x4f,
0xd2, 0x55, 0x66, 0x42, 0xee, 0x80, 0x1e, 0x43, 0xad, 0x4f, 0x46, 0x69, 0x8c, 0x85, 0x77, 0xf9,
0x8a, 0xde, 0x53, 0x17, 0xd1, 0x3d, 0x98, 0x52, 0x42, 0xe5, 0x34, 0xaa, 0x05, 0xca, 0x70, 0xff,
0x2d, 0x40, 0xc3, 0x14, 0x6b, 0x6e, 0xd2, 0x3e, 0x87, 0xb2, 0x92, 0x5e, 0x75, 0xdd, 0xcd, 0x4a,
0xa5, 0x18, 0x16, 0x96, 0xca, 0x86, 0x4a, 0x3f, 0xa3, 0x72, 0x0c, 0xab, 0xe1, 0x9c, 0x9b, 0x22,
0x60, 0x4e, 0x78, 0x18, 0xcb, 0x52, 0x15, 0x03, 0x65, 0x88, 0xe9, 0x3c, 0xd9, 0x9e, 0xae, 0x37,
0x9d, 0x27, 0x6e, 0xa6, 0x0c, 0x95, 0xb7, 0x92, 0xa1, 0x7a, 0x6d, 0x19, 0xdc, 0x3f, 0x2c, 0xa8,
0x4d, 0xba, 0xdc, 0xa8, 0xae, 0xf5, 0xd6, 0xd5, 0x9d, 0xa9, 0x4c, 0xe1, 0x66, 0x95, 0xb9, 0x0d,
0x65, 0xc6, 0x29, 0x0e, 0x47, 0x52, 0xa3, 0x62, 0xa0, 0x2d, 0xf1, 0x9e, 0x8c, 0xd8, 0x50, 0x2a,
0xd4, 0x08, 0xc4, 0xd1, 0x75, 0xa1, 0xd1, 0x1e, 0x73, 0xcc, 0xf6, 0x30, 0x13, 0x4b, 0x89, 0xd0,
0x76, 0x10, 0xf2, 0x50, 0xe6, 0xd1, 0x08, 0xe4, 0xd9, 0xbd, 0x0b, 0x68, 0x37, 0x62, 0xfc, 0xa5,
0xdc, 0x45, 0xd9, 0xb2, 0xfd, 0xb1, 0x0b, 0xef, 0xcd, 0xa0, 0xf5, 0x2b, 0xf5, 0xd5, 0xb9, 0x0d,
0xf2, 0xb3, 0xf9, 0x57, 0x43, 0xae, 0xbc, 0x9e, 0x72, 0x9c, 0x5d, 0x24, 0x37, 0xff, 0x29, 0x42,
0x65, 0x5b, 0x6d, 0xf3, 0x68, 0x1f, 0x6a, 0x93, 0x05, 0x15, 0xb9, 0xf3, 0x34, 0xe7, 0x37, 0x5d,
0xe7, 0xd3, 0x4b, 0x31, 0x3a, 0xbe, 0x6f, 0x60, 0x45, 0x2e, 0xd7, 0x68, 0xc1, 0x33, 0x68, 0x6e,
0xdd, 0xce, 0xe5, 0xab, 0xef, 0x7d, 0x4b, 0x30, 0xc9, 0x19, 0xb2, 0x88, 0xc9, 0x5c, 0x36, 0x9c,
0x8d, 0x25, 0xc3, 0x07, 0xed, 0x41, 0x59, 0x7f, 0xce, 0x8b, 0xa0, 0xe6, 0xa4, 0x70, 0x9a, 0x17,
0x03, 0x14, 0xd9, 0x7d, 0x0b, 0xed, 0x4d, 0x36, 0xa9, 0x45, 0xa1, 0x99, 0x6d, 0xe0, 0x2c, 0xf9,
0x7f, 0xcb, 0xba, 0x6f, 0xa1, 0x1f, 0xa0, 0x6e, 0x08, 0x8d, 0x16, 0x08, 0x3a, 0xdf, 0x35, 0xce,
0xe7, 0x4b, 0x50, 0x2a, 0xd8, 0x76, 0xe3, 0xf5, 0xd9, 0xba, 0xf5, 0xe7, 0xd9, 0xba, 0xf5, 0xf7,
0xd9, 0xba, 0xd5, 0x2b, 0xcb, 0xbe, 0xff, 0xe2, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x10, 0x8f,
0x03, 0xa4, 0xd1, 0x0d, 0x00, 0x00,
}

View file

@ -25,11 +25,12 @@ service Control {
}
message PruneRequest {
// TODO: filter
repeated string filter = 1; // FIXME: not implemented
bool all = 2;
}
message DiskUsageRequest {
string filter = 1; // FIXME: this should be containerd-compatible repeated string?
repeated string filter = 1;
}
message DiskUsageResponse {
@ -46,6 +47,8 @@ message UsageRecord {
google.protobuf.Timestamp LastUsedAt = 7 [(gogoproto.stdtime) = true];
int64 UsageCount = 8;
string Description = 9;
string RecordType = 10;
bool Shared = 11;
}
message SolveRequest {

View file

@ -2,10 +2,10 @@ package cache
import (
"context"
"strings"
"sync"
"time"
"github.com/containerd/containerd/filters"
"github.com/containerd/containerd/snapshots"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
@ -23,9 +23,10 @@ var (
)
type ManagerOpt struct {
Snapshotter snapshot.SnapshotterBase
GCPolicy GCPolicy
MetadataStore *metadata.Store
Snapshotter snapshot.SnapshotterBase
GCPolicy GCPolicy
MetadataStore *metadata.Store
PruneRefChecker ExternalRefCheckerFunc
}
type Accessor interface {
@ -37,7 +38,7 @@ type Accessor interface {
type Controller interface {
DiskUsage(ctx context.Context, info client.DiskUsageInfo) ([]*client.UsageInfo, error)
Prune(ctx context.Context, ch chan client.UsageInfo) error
Prune(ctx context.Context, ch chan client.UsageInfo, info client.PruneInfo) error
GC(ctx context.Context) error
}
@ -47,6 +48,12 @@ type Manager interface {
Close() error
}
type ExternalRefCheckerFunc func() (ExternalRefChecker, error)
type ExternalRefChecker interface {
Exists(key string) bool
}
type cacheManager struct {
records map[string]*cacheRecord
mu sync.Mutex
@ -296,13 +303,28 @@ func (cm *cacheManager) GetMutable(ctx context.Context, id string) (MutableRef,
return rec.mref(), nil
}
func (cm *cacheManager) Prune(ctx context.Context, ch chan client.UsageInfo) error {
func (cm *cacheManager) Prune(ctx context.Context, ch chan client.UsageInfo, opt client.PruneInfo) error {
cm.muPrune.Lock()
defer cm.muPrune.Unlock()
return cm.prune(ctx, ch)
filter, err := filters.ParseAll(opt.Filter...)
if err != nil {
return err
}
var check ExternalRefChecker
if f := cm.PruneRefChecker; f != nil && (!opt.All || len(opt.Filter) > 0) {
c, err := f()
if err != nil {
return err
}
check = c
}
return cm.prune(ctx, ch, filter, opt.All, check)
}
func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo) error {
func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, filter filters.Filter, all bool, checkShared ExternalRefChecker) error {
var toDelete []*cacheRecord
cm.mu.Lock()
@ -321,16 +343,44 @@ func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo) err
}
if len(cr.refs) == 0 {
cr.dead = true
toDelete = append(toDelete, cr)
recordType := GetRecordType(cr)
if recordType == "" {
recordType = client.UsageRecordTypeRegular
}
shared := false
if checkShared != nil {
shared = checkShared.Exists(cr.ID())
}
if !all {
if recordType == client.UsageRecordTypeInternal || recordType == client.UsageRecordTypeFrontend || shared {
cr.mu.Unlock()
continue
}
}
c := &client.UsageInfo{
ID: cr.ID(),
Mutable: cr.mutable,
RecordType: recordType,
Shared: shared,
}
if filter.Match(adaptUsageInfo(c)) {
cr.dead = true
toDelete = append(toDelete, cr)
// mark metadata as deleted in case we crash before cleanup finished
if err := setDeleted(cr.md); err != nil {
cr.mu.Unlock()
cm.mu.Unlock()
return err
}
}
}
// mark metadata as deleted in case we crash before cleanup finished
if err := setDeleted(cr.md); err != nil {
cr.mu.Unlock()
cm.mu.Unlock()
return err
}
cr.mu.Unlock()
}
@ -393,24 +443,61 @@ func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo) err
case <-ctx.Done():
return ctx.Err()
default:
return cm.prune(ctx, ch)
return cm.prune(ctx, ch, filter, all, checkShared)
}
}
func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) {
cm.mu.Lock()
type cacheUsageInfo struct {
refs int
parent string
size int64
mutable bool
createdAt time.Time
usageCount int
lastUsedAt *time.Time
description string
doubleRef bool
func (cm *cacheManager) markShared(m map[string]*cacheUsageInfo) error {
if cm.PruneRefChecker == nil {
return nil
}
c, err := cm.PruneRefChecker()
if err != nil {
return err
}
var markAllParentsShared func(string)
markAllParentsShared = func(id string) {
if v, ok := m[id]; ok {
v.shared = true
if v.parent != "" {
markAllParentsShared(v.parent)
}
}
}
for id := range m {
if m[id].shared {
continue
}
if b := c.Exists(id); b {
markAllParentsShared(id)
}
}
return nil
}
type cacheUsageInfo struct {
refs int
parent string
size int64
mutable bool
createdAt time.Time
usageCount int
lastUsedAt *time.Time
description string
doubleRef bool
recordType client.UsageRecordType
shared bool
}
func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) {
filter, err := filters.ParseAll(opt.Filter...)
if err != nil {
return nil, err
}
cm.mu.Lock()
m := make(map[string]*cacheUsageInfo, len(cm.records))
rescan := make(map[string]struct{}, len(cm.records))
@ -433,6 +520,10 @@ func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo)
lastUsedAt: lastUsedAt,
description: GetDescription(cr.md),
doubleRef: cr.equalImmutable != nil,
recordType: GetRecordType(cr),
}
if c.recordType == "" {
c.recordType = client.UsageRecordTypeRegular
}
if cr.parent != nil {
c.parent = cr.parent.ID()
@ -463,12 +554,12 @@ func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo)
}
}
if err := cm.markShared(m); err != nil {
return nil, err
}
var du []*client.UsageInfo
for id, cr := range m {
if opt.Filter != "" && !strings.HasPrefix(id, opt.Filter) {
continue
}
c := &client.UsageInfo{
ID: id,
Mutable: cr.mutable,
@ -479,8 +570,12 @@ func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo)
Description: cr.description,
LastUsedAt: cr.lastUsedAt,
UsageCount: cr.usageCount,
RecordType: cr.recordType,
Shared: cr.shared,
}
if filter.Match(adaptUsageInfo(c)) {
du = append(du, c)
}
du = append(du, c)
}
eg, ctx := errgroup.WithContext(ctx)
@ -547,6 +642,12 @@ func WithDescription(descr string) RefOption {
}
}
func WithRecordType(t client.UsageRecordType) RefOption {
return func(m withMetadata) error {
return queueRecordType(m.Metadata(), t)
}
}
func WithCreationTime(tm time.Time) RefOption {
return func(m withMetadata) error {
return queueCreatedAt(m.Metadata(), tm)
@ -571,3 +672,36 @@ func initializeMetadata(m withMetadata, opts ...RefOption) error {
return md.Commit()
}
func adaptUsageInfo(info *client.UsageInfo) filters.Adaptor {
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "id":
return info.ID, info.ID != ""
case "parent":
return info.Parent, info.Parent != ""
case "description":
return info.Description, info.Description != ""
case "inuse":
return "", info.InUse
case "mutable":
return "", info.Mutable
case "immutable":
return "", !info.Mutable
case "type":
return string(info.RecordType), info.RecordType != ""
case "shared":
return "", info.Shared
case "private":
return "", !info.Shared
}
// TODO: add int/datetime/bytes support for more fields
return "", false
})
}

View file

@ -5,6 +5,7 @@ import (
"github.com/boltdb/bolt"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/pkg/errors"
)
@ -16,6 +17,8 @@ const keyDescription = "cache.description"
const keyCreatedAt = "cache.createdAt"
const keyLastUsedAt = "cache.lastUsedAt"
const keyUsageCount = "cache.usageCount"
const keyLayerType = "cache.layerType"
const keyRecordType = "cache.recordType"
const keyDeleted = "cache.deleted"
@ -204,3 +207,56 @@ func updateLastUsed(si *metadata.StorageItem) error {
return si.SetValue(b, keyLastUsedAt, v2)
})
}
func SetLayerType(m withMetadata, value string) error {
v, err := metadata.NewValue(value)
if err != nil {
return errors.Wrap(err, "failed to create layertype value")
}
m.Metadata().Queue(func(b *bolt.Bucket) error {
return m.Metadata().SetValue(b, keyLayerType, v)
})
return m.Metadata().Commit()
}
func GetLayerType(m withMetadata) string {
v := m.Metadata().Get(keyLayerType)
if v == nil {
return ""
}
var str string
if err := v.Unmarshal(&str); err != nil {
return ""
}
return str
}
func GetRecordType(m withMetadata) client.UsageRecordType {
v := m.Metadata().Get(keyRecordType)
if v == nil {
return ""
}
var str string
if err := v.Unmarshal(&str); err != nil {
return ""
}
return client.UsageRecordType(str)
}
func SetRecordType(m withMetadata, value client.UsageRecordType) error {
if err := queueRecordType(m.Metadata(), value); err != nil {
return err
}
return m.Metadata().Commit()
}
func queueRecordType(si *metadata.StorageItem, value client.UsageRecordType) error {
v, err := metadata.NewValue(value)
if err != nil {
return errors.Wrap(err, "failed to create recordtype value")
}
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, keyRecordType, v)
})
return nil
}

View file

@ -20,12 +20,14 @@ type UsageInfo struct {
UsageCount int
Parent string
Description string
RecordType UsageRecordType
Shared bool
}
func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*UsageInfo, error) {
info := &DiskUsageInfo{}
for _, o := range opts {
o(info)
o.SetDiskUsageOption(info)
}
req := &controlapi.DiskUsageRequest{Filter: info.Filter}
@ -47,6 +49,8 @@ func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*Usa
Description: d.Description,
UsageCount: int(d.UsageCount),
LastUsedAt: d.LastUsedAt,
RecordType: UsageRecordType(d.RecordType),
Shared: d.Shared,
})
}
@ -60,14 +64,21 @@ func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*Usa
return du, nil
}
type DiskUsageOption func(*DiskUsageInfo)
type DiskUsageOption interface {
SetDiskUsageOption(*DiskUsageInfo)
}
type DiskUsageInfo struct {
Filter string
Filter []string
}
func WithFilter(f string) DiskUsageOption {
return func(di *DiskUsageInfo) {
di.Filter = f
}
}
type UsageRecordType string
const (
UsageRecordTypeInternal UsageRecordType = "internal"
UsageRecordTypeFrontend UsageRecordType = "frontend"
UsageRecordTypeLocalSource UsageRecordType = "source.local"
UsageRecordTypeGitCheckout UsageRecordType = "source.git.checkout"
UsageRecordTypeCacheMount UsageRecordType = "exec.cachemount"
UsageRecordTypeRegular UsageRecordType = "regular"
)

19
vendor/github.com/moby/buildkit/client/filter.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
package client
func WithFilter(f []string) Filter {
return Filter(f)
}
type Filter []string
func (f Filter) SetDiskUsageOption(di *DiskUsageInfo) {
di.Filter = f
}
func (f Filter) SetPruneOption(pi *PruneInfo) {
pi.Filter = f
}
func (f Filter) SetListWorkersOption(lwi *ListWorkersInfo) {
lwi.Filter = f
}

View file

@ -57,6 +57,7 @@ type ExecOp struct {
meta Meta
constraints Constraints
isValidated bool
secrets []SecretInfo
}
func (e *ExecOp) AddMount(target string, source Output, opt ...MountOption) Output {
@ -142,6 +143,23 @@ func (e *ExecOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata,
FtpProxy: p.FtpProxy,
NoProxy: p.NoProxy,
}
addCap(&e.constraints, pb.CapExecMetaProxy)
}
addCap(&e.constraints, pb.CapExecMetaBase)
for _, m := range e.mounts {
if m.selector != "" {
addCap(&e.constraints, pb.CapExecMountSelector)
}
if m.cacheID != "" {
addCap(&e.constraints, pb.CapExecMountCache)
addCap(&e.constraints, pb.CapExecMountCacheSharing)
} else if m.tmpfs {
addCap(&e.constraints, pb.CapExecMountTmpfs)
} else if m.source != nil {
addCap(&e.constraints, pb.CapExecMountBind)
}
}
pop, md := MarshalConstraints(c, &e.constraints)
@ -211,6 +229,25 @@ func (e *ExecOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata,
peo.Mounts = append(peo.Mounts, pm)
}
if len(e.secrets) > 0 {
addCap(&e.constraints, pb.CapMountSecret)
}
for _, s := range e.secrets {
pm := &pb.Mount{
Dest: s.Target,
MountType: pb.MountType_SECRET,
SecretOpt: &pb.SecretOpt{
ID: s.ID,
Uid: uint32(s.UID),
Gid: uint32(s.GID),
Optional: s.Optional,
Mode: uint32(s.Mode),
},
}
peo.Mounts = append(peo.Mounts, pm)
}
dt, err := pop.Marshal()
if err != nil {
return "", nil, nil, err
@ -367,6 +404,53 @@ func AddMount(dest string, mountState State, opts ...MountOption) RunOption {
})
}
func AddSecret(dest string, opts ...SecretOption) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
s := &SecretInfo{ID: dest, Target: dest, Mode: 0400}
for _, opt := range opts {
opt.SetSecretOption(s)
}
ei.Secrets = append(ei.Secrets, *s)
})
}
type SecretOption interface {
SetSecretOption(*SecretInfo)
}
type secretOptionFunc func(*SecretInfo)
func (fn secretOptionFunc) SetSecretOption(si *SecretInfo) {
fn(si)
}
type SecretInfo struct {
ID string
Target string
Mode int
UID int
GID int
Optional bool
}
var SecretOptional = secretOptionFunc(func(si *SecretInfo) {
si.Optional = true
})
func SecretID(id string) SecretOption {
return secretOptionFunc(func(si *SecretInfo) {
si.ID = id
})
}
func SecretFileOpt(uid, gid, mode int) SecretOption {
return secretOptionFunc(func(si *SecretInfo) {
si.UID = uid
si.GID = gid
si.Mode = mode
})
}
func ReadonlyRootFS() RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.ReadonlyRootFS = true
@ -385,6 +469,7 @@ type ExecInfo struct {
Mounts []MountInfo
ReadonlyRootFS bool
ProxyEnv *ProxyEnv
Secrets []SecretInfo
}
type MountInfo struct {

View file

@ -5,10 +5,12 @@ import (
"net/http"
"sync"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/docker/docker/pkg/locker"
"github.com/moby/buildkit/client/llb"
gw "github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/imageutil"
digest "github.com/opencontainers/go-digest"
@ -18,7 +20,7 @@ import (
var defaultImageMetaResolver llb.ImageMetaResolver
var defaultImageMetaResolverOnce sync.Once
var WithDefault = llb.ImageOptionFunc(func(ii *llb.ImageInfo) {
var WithDefault = imageOptionFunc(func(ii *llb.ImageInfo) {
llb.WithMetaResolver(Default()).SetImageOption(ii)
})
@ -70,23 +72,39 @@ type resolveResult struct {
dgst digest.Digest
}
func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, platform *specs.Platform) (digest.Digest, []byte, error) {
func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error) {
imr.locker.Lock(ref)
defer imr.locker.Unlock(ref)
if res, ok := imr.cache[ref]; ok {
return res.dgst, res.config, nil
}
platform := opt.Platform
if platform == nil {
platform = imr.platform
}
k := imr.key(ref, platform)
if res, ok := imr.cache[k]; ok {
return res.dgst, res.config, nil
}
dgst, config, err := imageutil.Config(ctx, ref, imr.resolver, imr.buffer, platform)
if err != nil {
return "", nil, err
}
imr.cache[ref] = resolveResult{dgst: dgst, config: config}
imr.cache[k] = resolveResult{dgst: dgst, config: config}
return dgst, config, nil
}
func (imr *imageMetaResolver) key(ref string, platform *specs.Platform) string {
if platform != nil {
ref += platforms.Format(*platform)
}
return ref
}
type imageOptionFunc func(*llb.ImageInfo)
func (fn imageOptionFunc) SetImageOption(ii *llb.ImageInfo) {
fn(ii)
}

View file

@ -3,16 +3,16 @@ package llb
import (
"context"
gw "github.com/moby/buildkit/frontend/gateway/client"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go/v1"
)
func WithMetaResolver(mr ImageMetaResolver) ImageOption {
return ImageOptionFunc(func(ii *ImageInfo) {
return imageOptionFunc(func(ii *ImageInfo) {
ii.metaResolver = mr
})
}
type ImageMetaResolver interface {
ResolveImageConfig(ctx context.Context, ref string, platform *specs.Platform) (digest.Digest, []byte, error)
ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error)
}

View file

@ -9,7 +9,9 @@ import (
"strings"
"github.com/docker/distribution/reference"
gw "github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/apicaps"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
@ -58,6 +60,7 @@ func (s *SourceOp) Marshal(constraints *Constraints) (digest.Digest, []byte, *pb
uid = constraints.LocalUniqueID
}
s.attrs[pb.AttrLocalUniqueID] = uid
addCap(&s.constraints, pb.CapSourceLocalUnique)
}
}
proto, md := MarshalConstraints(constraints, &s.constraints)
@ -65,6 +68,11 @@ func (s *SourceOp) Marshal(constraints *Constraints) (digest.Digest, []byte, *pb
proto.Op = &pb.Op_Source{
Source: &pb.SourceOp{Identifier: s.id, Attrs: s.attrs},
}
if !platformSpecificSource(s.id) {
proto.Platform = nil
}
dt, err := proto.Marshal()
if err != nil {
return "", nil, nil, err
@ -91,12 +99,30 @@ func Image(ref string, opts ...ImageOption) State {
for _, opt := range opts {
opt.SetImageOption(&info)
}
src := NewSource("docker-image://"+ref, nil, info.Constraints) // controversial
addCap(&info.Constraints, pb.CapSourceImage)
attrs := map[string]string{}
if info.resolveMode != 0 {
attrs[pb.AttrImageResolveMode] = info.resolveMode.String()
if info.resolveMode == ResolveModeForcePull {
addCap(&info.Constraints, pb.CapSourceImageResolveMode) // only require cap for security enforced mode
}
}
if info.RecordType != "" {
attrs[pb.AttrImageRecordType] = info.RecordType
}
src := NewSource("docker-image://"+ref, attrs, info.Constraints) // controversial
if err != nil {
src.err = err
}
if info.metaResolver != nil {
_, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, info.Constraints.Platform)
_, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, gw.ResolveImageConfigOpt{
Platform: info.Constraints.Platform,
ResolveMode: info.resolveMode.String(),
})
if err != nil {
src.err = err
} else {
@ -133,15 +159,46 @@ type ImageOption interface {
SetImageOption(*ImageInfo)
}
type ImageOptionFunc func(*ImageInfo)
type imageOptionFunc func(*ImageInfo)
func (fn ImageOptionFunc) SetImageOption(ii *ImageInfo) {
func (fn imageOptionFunc) SetImageOption(ii *ImageInfo) {
fn(ii)
}
var MarkImageInternal = imageOptionFunc(func(ii *ImageInfo) {
ii.RecordType = "internal"
})
type ResolveMode int
const (
ResolveModeDefault ResolveMode = iota
ResolveModeForcePull
ResolveModePreferLocal
)
func (r ResolveMode) SetImageOption(ii *ImageInfo) {
ii.resolveMode = r
}
func (r ResolveMode) String() string {
switch r {
case ResolveModeDefault:
return pb.AttrImageResolveModeDefault
case ResolveModeForcePull:
return pb.AttrImageResolveModeForcePull
case ResolveModePreferLocal:
return pb.AttrImageResolveModePreferLocal
default:
return ""
}
}
type ImageInfo struct {
constraintsWrapper
metaResolver ImageMetaResolver
resolveMode ResolveMode
RecordType string
}
func Git(remote, ref string, opts ...GitOption) State {
@ -169,10 +226,15 @@ func Git(remote, ref string, opts ...GitOption) State {
attrs := map[string]string{}
if gi.KeepGitDir {
attrs[pb.AttrKeepGitDir] = "true"
addCap(&gi.Constraints, pb.CapSourceGitKeepDir)
}
if url != "" {
attrs[pb.AttrFullRemoteURL] = url
addCap(&gi.Constraints, pb.CapSourceGitFullURL)
}
addCap(&gi.Constraints, pb.CapSourceGit)
source := NewSource("git://"+id, attrs, gi.Constraints)
return NewState(source.Output())
}
@ -210,20 +272,27 @@ func Local(name string, opts ...LocalOption) State {
attrs := map[string]string{}
if gi.SessionID != "" {
attrs[pb.AttrLocalSessionID] = gi.SessionID
addCap(&gi.Constraints, pb.CapSourceLocalSessionID)
}
if gi.IncludePatterns != "" {
attrs[pb.AttrIncludePatterns] = gi.IncludePatterns
addCap(&gi.Constraints, pb.CapSourceLocalIncludePatterns)
}
if gi.FollowPaths != "" {
attrs[pb.AttrFollowPaths] = gi.FollowPaths
addCap(&gi.Constraints, pb.CapSourceLocalFollowPaths)
}
if gi.ExcludePatterns != "" {
attrs[pb.AttrExcludePatterns] = gi.ExcludePatterns
addCap(&gi.Constraints, pb.CapSourceLocalExcludePatterns)
}
if gi.SharedKeyHint != "" {
attrs[pb.AttrSharedKeyHint] = gi.SharedKeyHint
addCap(&gi.Constraints, pb.CapSourceLocalSharedKeyHint)
}
addCap(&gi.Constraints, pb.CapSourceLocal)
source := NewSource("local://"+name, attrs, gi.Constraints)
return NewState(source.Output())
}
@ -300,20 +369,25 @@ func HTTP(url string, opts ...HTTPOption) State {
attrs := map[string]string{}
if hi.Checksum != "" {
attrs[pb.AttrHTTPChecksum] = hi.Checksum.String()
addCap(&hi.Constraints, pb.CapSourceHTTPChecksum)
}
if hi.Filename != "" {
attrs[pb.AttrHTTPFilename] = hi.Filename
}
if hi.Perm != 0 {
attrs[pb.AttrHTTPPerm] = "0" + strconv.FormatInt(int64(hi.Perm), 8)
addCap(&hi.Constraints, pb.CapSourceHTTPPerm)
}
if hi.UID != 0 {
attrs[pb.AttrHTTPUID] = strconv.Itoa(hi.UID)
addCap(&hi.Constraints, pb.CapSourceHTTPUIDGID)
}
if hi.UID != 0 {
if hi.GID != 0 {
attrs[pb.AttrHTTPGID] = strconv.Itoa(hi.GID)
addCap(&hi.Constraints, pb.CapSourceHTTPUIDGID)
}
addCap(&hi.Constraints, pb.CapSourceHTTP)
source := NewSource(url, attrs, hi.Constraints)
return NewState(source.Output())
}
@ -361,3 +435,14 @@ func Chown(uid, gid int) HTTPOption {
hi.GID = gid
})
}
func platformSpecificSource(id string) bool {
return strings.HasPrefix(id, "docker-image://")
}
func addCap(c *Constraints, id apicaps.CapID) {
if c.Metadata.Caps == nil {
c.Metadata.Caps = make(map[apicaps.CapID]bool)
}
c.Metadata.Caps[id] = true
}

View file

@ -2,10 +2,12 @@ package llb
import (
"context"
"fmt"
"github.com/containerd/containerd/platforms"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/apicaps"
"github.com/moby/buildkit/util/system"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go/v1"
@ -100,6 +102,28 @@ func (s State) Marshal(co ...ConstraintsOpt) (*Definition, error) {
return def, err
}
def.Def = append(def.Def, dt)
dgst := digest.FromBytes(dt)
md := def.Metadata[dgst]
md.Caps = map[apicaps.CapID]bool{
pb.CapConstraints: true,
pb.CapPlatform: true,
}
for _, m := range def.Metadata {
if m.IgnoreCache {
md.Caps[pb.CapMetaIgnoreCache] = true
}
if m.Description != nil {
md.Caps[pb.CapMetaDescription] = true
}
if m.ExportCache != nil {
md.Caps[pb.CapMetaExportCache] = true
}
}
def.Metadata[dgst] = md
return def, nil
}
@ -168,6 +192,7 @@ func (s State) Run(ro ...RunOption) ExecState {
for _, m := range ei.Mounts {
exec.AddMount(m.Target, m.Source, m.Opts...)
}
exec.secrets = ei.Secrets
return ExecState{
State: s.WithOutput(exec.Output()),
@ -194,6 +219,10 @@ func (s State) GetEnv(key string) (string, bool) {
return getEnv(s).Get(key)
}
func (s State) Env() []string {
return getEnv(s).ToArray()
}
func (s State) GetDir() string {
return getDir(s)
}
@ -310,6 +339,13 @@ func mergeMetadata(m1, m2 pb.OpMetadata) pb.OpMetadata {
m1.ExportCache = m2.ExportCache
}
for k := range m2.Caps {
if m1.Caps == nil {
m1.Caps = make(map[apicaps.CapID]bool, len(m2.Caps))
}
m1.Caps[k] = true
}
return m1
}
@ -319,7 +355,18 @@ var IgnoreCache = constraintsOptFunc(func(c *Constraints) {
func WithDescription(m map[string]string) ConstraintsOpt {
return constraintsOptFunc(func(c *Constraints) {
c.Metadata.Description = m
if c.Metadata.Description == nil {
c.Metadata.Description = map[string]string{}
}
for k, v := range m {
c.Metadata.Description[k] = v
}
})
}
func WithCustomName(name string, a ...interface{}) ConstraintsOpt {
return WithDescription(map[string]string{
"llb.customname": fmt.Sprintf(name, a...),
})
}

View file

@ -11,10 +11,13 @@ import (
func (c *Client) Prune(ctx context.Context, ch chan UsageInfo, opts ...PruneOption) error {
info := &PruneInfo{}
for _, o := range opts {
o(info)
o.SetPruneOption(info)
}
req := &controlapi.PruneRequest{}
req := &controlapi.PruneRequest{Filter: info.Filter}
if info.All {
req.All = true
}
cl, err := c.controlClient().Prune(ctx, req)
if err != nil {
return errors.Wrap(err, "failed to call prune")
@ -39,12 +42,28 @@ func (c *Client) Prune(ctx context.Context, ch chan UsageInfo, opts ...PruneOpti
Description: d.Description,
UsageCount: int(d.UsageCount),
LastUsedAt: d.LastUsedAt,
RecordType: UsageRecordType(d.RecordType),
Shared: d.Shared,
}
}
}
}
type PruneOption func(*PruneInfo)
type PruneOption interface {
SetPruneOption(*PruneInfo)
}
type PruneInfo struct {
Filter []string
All bool
}
type pruneOptionFunc func(*PruneInfo)
func (f pruneOptionFunc) SetPruneOption(pi *PruneInfo) {
f(pi)
}
var PruneAll = pruneOptionFunc(func(pi *PruneInfo) {
pi.All = true
})

View file

@ -18,7 +18,7 @@ type WorkerInfo struct {
func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]*WorkerInfo, error) {
info := &ListWorkersInfo{}
for _, o := range opts {
o(info)
o.SetListWorkersOption(info)
}
req := &controlapi.ListWorkersRequest{Filter: info.Filter}
@ -40,14 +40,10 @@ func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]
return wi, nil
}
type ListWorkersOption func(*ListWorkersInfo)
type ListWorkersOption interface {
SetListWorkersOption(*ListWorkersInfo)
}
type ListWorkersInfo struct {
Filter []string
}
func WithWorkerFilter(f []string) ListWorkersOption {
return func(wi *ListWorkersInfo) {
wi.Filter = f
}
}

View file

@ -36,10 +36,13 @@ type Opt struct {
type Controller struct { // TODO: ControlService
opt Opt
solver *llbsolver.Solver
cache solver.CacheManager
}
func NewController(opt Opt) (*Controller, error) {
solver, err := llbsolver.New(opt.WorkerController, opt.Frontends, opt.CacheKeyStorage, opt.ResolveCacheImporterFunc)
cache := solver.NewCacheManager("local", opt.CacheKeyStorage, worker.NewCacheResultStorage(opt.WorkerController))
solver, err := llbsolver.New(opt.WorkerController, opt.Frontends, cache, opt.ResolveCacheImporterFunc)
if err != nil {
return nil, errors.Wrap(err, "failed to create solver")
}
@ -47,6 +50,7 @@ func NewController(opt Opt) (*Controller, error) {
c := &Controller{
opt: opt,
solver: solver,
cache: cache,
}
return c, nil
}
@ -82,6 +86,8 @@ func (c *Controller) DiskUsage(ctx context.Context, r *controlapi.DiskUsageReque
Description: r.Description,
CreatedAt: r.CreatedAt,
LastUsedAt: r.LastUsedAt,
RecordType: string(r.RecordType),
Shared: r.Shared,
})
}
}
@ -97,10 +103,26 @@ func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Contr
return errors.Wrap(err, "failed to list workers for prune")
}
didPrune := false
defer func() {
if didPrune {
if c, ok := c.cache.(interface {
ReleaseUnreferenced() error
}); ok {
if err := c.ReleaseUnreferenced(); err != nil {
logrus.Errorf("failed to release cache metadata: %+v")
}
}
}
}()
for _, w := range workers {
func(w worker.Worker) {
eg.Go(func() error {
return w.Prune(ctx, ch)
return w.Prune(ctx, ch, client.PruneInfo{
Filter: req.Filter,
All: req.All,
})
})
}(w)
}
@ -114,6 +136,7 @@ func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Contr
eg2.Go(func() error {
for r := range ch {
didPrune = true
if err := stream.Send(&controlapi.UsageRecord{
// TODO: add worker info
ID: r.ID,
@ -125,6 +148,8 @@ func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Contr
Description: r.Description,
CreatedAt: r.CreatedAt,
LastUsedAt: r.LastUsedAt,
RecordType: string(r.RecordType),
Shared: r.Shared,
}); err != nil {
return err
}

View file

@ -167,7 +167,7 @@ func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache.
if err != nil {
return errors.Wrapf(err, "working dir %s points to invalid target", newp)
}
if err := os.MkdirAll(newp, 0700); err != nil {
if err := os.MkdirAll(newp, 0755); err != nil {
return errors.Wrapf(err, "failed to create working directory %s", newp)
}

View file

@ -0,0 +1,15 @@
package exptypes
import specs "github.com/opencontainers/image-spec/specs-go/v1"
const ExporterImageConfigKey = "containerimage.config"
const ExporterPlatformsKey = "refs.platforms"
type Platforms struct {
Platforms []Platform
}
type Platform struct {
ID string
Platform specs.Platform
}

View file

@ -12,5 +12,11 @@ type Exporter interface {
type ExporterInstance interface {
Name() string
Export(context.Context, cache.ImmutableRef, map[string][]byte) (map[string]string, error)
Export(context.Context, Source) (map[string]string, error)
}
type Source struct {
Ref cache.ImmutableRef
Refs map[string]cache.ImmutableRef
Metadata map[string][]byte
}

View file

@ -5,14 +5,18 @@ import (
"bytes"
"context"
"encoding/json"
"fmt"
"regexp"
"strconv"
"strings"
"github.com/containerd/containerd/platforms"
"github.com/docker/docker/builder/dockerignore"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb"
"github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/solver/pb"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
@ -24,13 +28,14 @@ const (
keyTarget = "target"
keyFilename = "filename"
keyCacheFrom = "cache-from"
exporterImageConfig = "containerimage.config"
defaultDockerfileName = "Dockerfile"
dockerignoreFilename = ".dockerignore"
buildArgPrefix = "build-arg:"
labelPrefix = "label:"
keyNoCache = "no-cache"
keyTargetPlatform = "platform"
keyMultiPlatform = "multi-platform"
keyImageResolveMode = "image-resolve-mode"
)
var httpPrefix = regexp.MustCompile("^https?://")
@ -45,15 +50,20 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
}
buildPlatforms := []specs.Platform{defaultBuildPlatform}
targetPlatform := platforms.DefaultSpec()
targetPlatforms := []*specs.Platform{nil}
if v := opts[keyTargetPlatform]; v != "" {
var err error
targetPlatform, err = platforms.Parse(v)
targetPlatforms, err = parsePlatforms(v)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse target platform %s", v)
return nil, err
}
}
resolveMode, err := parseResolveMode(opts[keyImageResolveMode])
if err != nil {
return nil, err
}
filename := opts[keyFilename]
if filename == "" {
filename = defaultDockerfileName
@ -68,10 +78,16 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
}
}
name := "load Dockerfile"
if filename != "Dockerfile" {
name += " from " + filename
}
src := llb.Local(LocalNameDockerfile,
llb.IncludePatterns([]string{filename}),
llb.SessionID(c.BuildOpts().SessionID),
llb.SharedKeyHint(defaultDockerfileName),
dockerfile2llb.WithInternalName(name),
)
var buildContext *llb.State
isScratchContext := false
@ -79,7 +95,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
src = *st
buildContext = &src
} else if httpPrefix.MatchString(opts[LocalNameContext]) {
httpContext := llb.HTTP(opts[LocalNameContext], llb.Filename("context"))
httpContext := llb.HTTP(opts[LocalNameContext], llb.Filename("context"), dockerfile2llb.WithInternalName("load remote build context"))
def, err := httpContext.Marshal()
if err != nil {
return nil, errors.Wrapf(err, "failed to marshal httpcontext")
@ -106,8 +122,8 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
return nil, errors.Errorf("failed to read downloaded context")
}
if isArchive(dt) {
unpack := llb.Image(dockerfile2llb.CopyImage).
Run(llb.Shlex("copy --unpack /src/context /out/"), llb.ReadonlyRootFS())
unpack := llb.Image(dockerfile2llb.CopyImage, dockerfile2llb.WithInternalName("helper image for file operations")).
Run(llb.Shlex("copy --unpack /src/context /out/"), llb.ReadonlyRootFS(), dockerfile2llb.WithInternalName("extracting build context"))
unpack.AddMount("/src", httpContext, llb.Readonly)
src = unpack.AddMount("/out", llb.Scratch())
buildContext = &src
@ -156,6 +172,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
llb.SessionID(c.BuildOpts().SessionID),
llb.IncludePatterns([]string{dockerignoreFilename}),
llb.SharedKeyHint(dockerignoreFilename),
dockerfile2llb.WithInternalName("load "+dockerignoreFilename),
)
dockerignoreState = &st
}
@ -197,47 +214,109 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
}
}
st, img, err := dockerfile2llb.Dockerfile2LLB(ctx, dtDockerfile, dockerfile2llb.ConvertOpt{
Target: opts[keyTarget],
MetaResolver: c,
BuildArgs: filter(opts, buildArgPrefix),
Labels: filter(opts, labelPrefix),
SessionID: c.BuildOpts().SessionID,
BuildContext: buildContext,
Excludes: excludes,
IgnoreCache: ignoreCache,
TargetPlatform: &targetPlatform,
BuildPlatforms: buildPlatforms,
})
exportMap := len(targetPlatforms) > 1
if err != nil {
return nil, errors.Wrapf(err, "failed to create LLB definition")
if v := opts[keyMultiPlatform]; v != "" {
b, err := strconv.ParseBool(v)
if err != nil {
return nil, errors.Errorf("invalid boolean value %s", v)
}
if !b && exportMap {
return nil, errors.Errorf("returning multiple target plaforms is not allowed")
}
exportMap = b
}
def, err = st.Marshal()
if err != nil {
return nil, errors.Wrapf(err, "failed to marshal LLB definition")
expPlatforms := &exptypes.Platforms{
Platforms: make([]exptypes.Platform, len(targetPlatforms)),
}
res := client.NewResult()
eg, ctx = errgroup.WithContext(ctx)
for i, tp := range targetPlatforms {
func(i int, tp *specs.Platform) {
eg.Go(func() error {
st, img, err := dockerfile2llb.Dockerfile2LLB(ctx, dtDockerfile, dockerfile2llb.ConvertOpt{
Target: opts[keyTarget],
MetaResolver: c,
BuildArgs: filter(opts, buildArgPrefix),
Labels: filter(opts, labelPrefix),
SessionID: c.BuildOpts().SessionID,
BuildContext: buildContext,
Excludes: excludes,
IgnoreCache: ignoreCache,
TargetPlatform: tp,
BuildPlatforms: buildPlatforms,
ImageResolveMode: resolveMode,
PrefixPlatform: exportMap,
})
if err != nil {
return errors.Wrapf(err, "failed to create LLB definition")
}
def, err := st.Marshal()
if err != nil {
return errors.Wrapf(err, "failed to marshal LLB definition")
}
config, err := json.Marshal(img)
if err != nil {
return errors.Wrapf(err, "failed to marshal image config")
}
var cacheFrom []string
if cacheFromStr := opts[keyCacheFrom]; cacheFromStr != "" {
cacheFrom = strings.Split(cacheFromStr, ",")
}
r, err := c.Solve(ctx, client.SolveRequest{
Definition: def.ToPB(),
ImportCacheRefs: cacheFrom,
})
if err != nil {
return err
}
ref, err := r.SingleRef()
if err != nil {
return err
}
if !exportMap {
res.AddMeta(exptypes.ExporterImageConfigKey, config)
res.SetRef(ref)
} else {
p := platforms.DefaultSpec()
if tp != nil {
p = *tp
}
k := platforms.Format(p)
res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, k), config)
res.AddRef(k, ref)
expPlatforms.Platforms[i] = exptypes.Platform{
ID: k,
Platform: p,
}
}
return nil
})
}(i, tp)
}
config, err := json.Marshal(img)
if err != nil {
return nil, errors.Wrapf(err, "failed to marshal image config")
}
var cacheFrom []string
if cacheFromStr := opts[keyCacheFrom]; cacheFromStr != "" {
cacheFrom = strings.Split(cacheFromStr, ",")
}
res, err := c.Solve(ctx, client.SolveRequest{
Definition: def.ToPB(),
ImportCacheRefs: cacheFrom,
})
if err != nil {
if err := eg.Wait(); err != nil {
return nil, err
}
res.AddMeta(exporterImageConfig, config)
if exportMap {
dt, err := json.Marshal(expPlatforms)
if err != nil {
return nil, err
}
res.AddMeta(exptypes.ExporterPlatformsKey, dt)
}
return res, nil
}
@ -286,7 +365,7 @@ func detectGitContext(ref string) (*llb.State, bool) {
if len(parts) > 1 {
branch = parts[1]
}
st := llb.Git(parts[0], branch)
st := llb.Git(parts[0], branch, dockerfile2llb.WithInternalName("load git source "+ref))
return &st, true
}
@ -308,3 +387,29 @@ func isArchive(header []byte) bool {
_, err := r.Next()
return err == nil
}
func parsePlatforms(v string) ([]*specs.Platform, error) {
var pp []*specs.Platform
for _, v := range strings.Split(v, ",") {
p, err := platforms.Parse(v)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse target platform %s", v)
}
p = platforms.Normalize(p)
pp = append(pp, &p)
}
return pp, nil
}
func parseResolveMode(v string) (llb.ResolveMode, error) {
switch v {
case pb.AttrImageResolveModeDefault, "":
return llb.ResolveModeDefault, nil
case pb.AttrImageResolveModeForcePull:
return llb.ResolveModeForcePull, nil
case pb.AttrImageResolveModePreferLocal:
return llb.ResolveModePreferLocal, nil
default:
return 0, errors.Errorf("invalid image-resolve-mode: %s", v)
}
}

View file

@ -21,6 +21,7 @@ import (
"github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/moby/buildkit/frontend/dockerfile/parser"
"github.com/moby/buildkit/frontend/dockerfile/shell"
gw "github.com/moby/buildkit/frontend/gateway/client"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
@ -47,8 +48,10 @@ type ConvertOpt struct {
IgnoreCache []string
// CacheIDNamespace scopes the IDs for different cache mounts
CacheIDNamespace string
ImageResolveMode llb.ResolveMode
TargetPlatform *specs.Platform
BuildPlatforms []specs.Platform
PrefixPlatform bool
}
func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, error) {
@ -56,16 +59,11 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
return nil, nil, errors.Errorf("the Dockerfile cannot be empty")
}
if opt.TargetPlatform != nil && opt.BuildPlatforms == nil {
opt.BuildPlatforms = []specs.Platform{*opt.TargetPlatform}
}
if len(opt.BuildPlatforms) == 0 {
opt.BuildPlatforms = []specs.Platform{platforms.DefaultSpec()}
}
implicitTargetPlatform := false
if opt.TargetPlatform == nil {
implicitTargetPlatform = true
opt.TargetPlatform = &opt.BuildPlatforms[0]
platformOpt := buildPlatformOpt(&opt)
optMetaArgs := getPlatformArgs(platformOpt)
for i, arg := range optMetaArgs {
optMetaArgs[i] = setKVValue(arg, opt.BuildArgs)
}
dockerfile, err := parser.Parse(bytes.NewReader(dt))
@ -80,7 +78,6 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
return nil, nil, err
}
optMetaArgs := []instructions.KeyValuePairOptional{}
for _, metaArg := range metaArgs {
optMetaArgs = append(optMetaArgs, setKVValue(metaArg.KeyValuePairOptional, opt.BuildArgs))
}
@ -95,8 +92,8 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
allDispatchStates := newDispatchStates()
// set base state for every image
for _, st := range stages {
name, err := shlex.ProcessWord(st.BaseName, toEnvList(optMetaArgs, nil))
for i, st := range stages {
name, err := shlex.ProcessWordWithMap(st.BaseName, metaArgsToMap(optMetaArgs))
if err != nil {
return nil, nil, err
}
@ -106,13 +103,19 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
st.BaseName = name
ds := &dispatchState{
stage: st,
deps: make(map[*dispatchState]struct{}),
ctxPaths: make(map[string]struct{}),
stage: st,
deps: make(map[*dispatchState]struct{}),
ctxPaths: make(map[string]struct{}),
stageName: st.Name,
prefixPlatform: opt.PrefixPlatform,
}
if st.Name == "" {
ds.stageName = fmt.Sprintf("stage-%d", i)
}
if v := st.Platform; v != "" {
v, err := shlex.ProcessWord(v, toEnvList(optMetaArgs, nil))
v, err := shlex.ProcessWordWithMap(v, metaArgsToMap(optMetaArgs))
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to process arguments for platform %s", v)
}
@ -124,6 +127,15 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
ds.platform = &p
}
total := 1
for _, cmd := range ds.stage.Commands {
switch cmd.(type) {
case *instructions.AddCommand, *instructions.CopyCommand, *instructions.RunCommand:
total++
}
}
ds.cmdTotal = total
allDispatchStates.addState(ds)
if opt.IgnoreCache != nil {
if len(opt.IgnoreCache) == 0 {
@ -138,6 +150,10 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
}
}
if len(allDispatchStates.states) == 1 {
allDispatchStates.states[0].stageName = ""
}
var target *dispatchState
if opt.Target == "" {
target = allDispatchStates.lastTarget()
@ -176,7 +192,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
if d.base == nil {
if d.stage.BaseName == emptyImageName {
d.state = llb.Scratch()
d.image = emptyImage(*opt.TargetPlatform)
d.image = emptyImage(platformOpt.targetPlatform)
continue
}
func(i int, d *dispatchState) {
@ -187,12 +203,16 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
}
platform := d.platform
if platform == nil {
platform = opt.TargetPlatform
platform = &platformOpt.targetPlatform
}
d.stage.BaseName = reference.TagNameOnly(ref).String()
var isScratch bool
if metaResolver != nil && reachable {
dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName, platform)
if metaResolver != nil && reachable && !d.unregistered {
dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName, gw.ResolveImageConfigOpt{
Platform: platform,
ResolveMode: opt.ImageResolveMode.String(),
LogName: fmt.Sprintf("[internal] load metadata for %s", d.stage.BaseName),
})
if err == nil { // handle the error while builder is actually running
var img Image
if err := json.Unmarshal(dt, &img); err != nil {
@ -200,8 +220,8 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
}
img.Created = nil
// if there is no explicit target platform, try to match based on image config
if d.platform == nil && implicitTargetPlatform {
p := autoDetectPlatform(img, *platform, opt.BuildPlatforms)
if d.platform == nil && platformOpt.implicitTarget {
p := autoDetectPlatform(img, *platform, platformOpt.buildPlatforms)
platform = &p
}
d.image = img
@ -221,8 +241,9 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
if isScratch {
d.state = llb.Scratch()
} else {
d.state = llb.Image(d.stage.BaseName, dfCmd(d.stage.SourceCode), llb.Platform(*platform))
d.state = llb.Image(d.stage.BaseName, dfCmd(d.stage.SourceCode), llb.Platform(*platform), opt.ImageResolveMode, llb.WithCustomName(prefixCommand(d, "FROM "+d.stage.BaseName, opt.PrefixPlatform, platform)))
}
d.platform = platform
return nil
})
}(i, d)
@ -242,19 +263,14 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
}
if d.base != nil {
d.state = d.base.state
d.platform = d.base.platform
d.image = clone(d.base.image)
}
// initialize base metadata from image conf
for _, env := range d.image.Config.Env {
parts := strings.SplitN(env, "=", 2)
v := ""
if len(parts) > 1 {
v = parts[1]
}
if err := dispatchEnv(d, &instructions.EnvCommand{Env: []instructions.KeyValuePair{{Key: parts[0], Value: v}}}, false); err != nil {
return nil, nil, err
}
k, v := parseKeyValue(env)
d.state = d.state.AddEnv(k, v)
}
if d.image.Config.WorkingDir != "" {
if err = dispatchWorkdir(d, &instructions.WorkdirCommand{Path: d.image.Config.WorkingDir}, false); err != nil {
@ -276,8 +292,8 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
buildContext: llb.NewState(buildContext),
proxyEnv: proxyEnv,
cacheIDNamespace: opt.CacheIDNamespace,
buildPlatforms: opt.BuildPlatforms,
targetPlatform: *opt.TargetPlatform,
buildPlatforms: platformOpt.buildPlatforms,
targetPlatform: platformOpt.targetPlatform,
}
if err = dispatchOnBuild(d, d.image.Config.OnBuild, opt); err != nil {
@ -306,26 +322,38 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
llb.SessionID(opt.SessionID),
llb.ExcludePatterns(opt.Excludes),
llb.SharedKeyHint(localNameContext),
WithInternalName("load build context"),
}
if includePatterns := normalizeContextPaths(ctxPaths); includePatterns != nil {
opts = append(opts, llb.FollowPaths(includePatterns))
}
bc := llb.Local(localNameContext, opts...)
if opt.BuildContext != nil {
bc = *opt.BuildContext
}
buildContext.Output = bc.Output()
st := target.state.SetMarhalDefaults(llb.Platform(*opt.TargetPlatform))
st := target.state.SetMarhalDefaults(llb.Platform(platformOpt.targetPlatform))
if !implicitTargetPlatform {
target.image.OS = opt.TargetPlatform.OS
target.image.Architecture = opt.TargetPlatform.Architecture
if !platformOpt.implicitTarget {
target.image.OS = platformOpt.targetPlatform.OS
target.image.Architecture = platformOpt.targetPlatform.Architecture
}
return &st, &target.image, nil
}
func metaArgsToMap(metaArgs []instructions.KeyValuePairOptional) map[string]string {
m := map[string]string{}
for _, arg := range metaArgs {
m[arg.Key] = arg.ValueString()
}
return m
}
func toCommand(ic instructions.Command, allDispatchStates *dispatchStates) (command, error) {
cmd := command{Command: ic}
if c, ok := ic.(*instructions.CopyCommand); ok {
@ -374,7 +402,7 @@ type dispatchOpt struct {
func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error {
if ex, ok := cmd.Command.(instructions.SupportsSingleWordExpansion); ok {
err := ex.Expand(func(word string) (string, error) {
return opt.shlex.ProcessWord(word, toEnvList(d.buildArgs, d.image.Config.Env))
return opt.shlex.ProcessWordWithMap(word, toEnvMap(d.buildArgs, d.image.Config.Env))
})
if err != nil {
return err
@ -386,7 +414,7 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error {
case *instructions.MaintainerCommand:
err = dispatchMaintainer(d, c)
case *instructions.EnvCommand:
err = dispatchEnv(d, c, true)
err = dispatchEnv(d, c)
case *instructions.RunCommand:
err = dispatchRun(d, c, opt.proxyEnv, cmd.sources, opt)
case *instructions.WorkdirCommand:
@ -437,18 +465,22 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error {
}
type dispatchState struct {
state llb.State
image Image
platform *specs.Platform
stage instructions.Stage
base *dispatchState
deps map[*dispatchState]struct{}
buildArgs []instructions.KeyValuePairOptional
commands []command
ctxPaths map[string]struct{}
ignoreCache bool
cmdSet bool
unregistered bool
state llb.State
image Image
platform *specs.Platform
stage instructions.Stage
base *dispatchState
deps map[*dispatchState]struct{}
buildArgs []instructions.KeyValuePairOptional
commands []command
ctxPaths map[string]struct{}
ignoreCache bool
cmdSet bool
unregistered bool
stageName string
cmdIndex int
cmdTotal int
prefixPlatform bool
}
type dispatchStates struct {
@ -517,17 +549,14 @@ func dispatchOnBuild(d *dispatchState, triggers []string, opt dispatchOpt) error
return nil
}
func dispatchEnv(d *dispatchState, c *instructions.EnvCommand, commit bool) error {
func dispatchEnv(d *dispatchState, c *instructions.EnvCommand) error {
commitMessage := bytes.NewBufferString("ENV")
for _, e := range c.Env {
commitMessage.WriteString(" " + e.String())
d.state = d.state.AddEnv(e.Key, e.Value)
d.image.Config.Env = addEnv(d.image.Config.Env, e.Key, e.Value, true)
d.image.Config.Env = addEnv(d.image.Config.Env, e.Key, e.Value)
}
if commit {
return commitToHistory(&d.image, commitMessage.String(), false, nil)
}
return nil
return commitToHistory(&d.image, commitMessage.String(), false, nil)
}
func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyEnv, sources []*dispatchState, dopt dispatchOpt) error {
@ -554,7 +583,7 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE
return err
}
opt = append(opt, runMounts...)
opt = append(opt, llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(dopt.shlex, c.String(), d.state.Run(opt...).Env())), d.prefixPlatform, d.state.GetPlatform())))
d.state = d.state.Run(opt...).Root()
return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs), true, &d.state)
}
@ -572,9 +601,9 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo
return nil
}
func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint interface{}, chown string, opt dispatchOpt) error {
func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint fmt.Stringer, chown string, opt dispatchOpt) error {
// TODO: this should use CopyOp instead. Current implementation is inefficient
img := llb.Image(CopyImage, llb.Platform(opt.buildPlatforms[0]))
img := llb.Image(CopyImage, llb.MarkImageInternal, llb.Platform(opt.buildPlatforms[0]), WithInternalName("helper image for file operations"))
dest := path.Join(".", pathRelativeToWorkingDir(d.state, c.Dest()))
if c.Dest() == "." || c.Dest()[len(c.Dest())-1] == filepath.Separator {
@ -644,13 +673,17 @@ func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState l
args = append(args[:1], append([]string{"--unpack"}, args[1:]...)...)
}
runOpt := []llb.RunOption{llb.Args(args), llb.Dir("/dest"), llb.ReadonlyRootFS(), dfCmd(cmdToPrint)}
runOpt := []llb.RunOption{llb.Args(args), llb.Dir("/dest"), llb.ReadonlyRootFS(), dfCmd(cmdToPrint), llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, cmdToPrint.String(), d.state.Env())), d.prefixPlatform, d.state.GetPlatform()))}
if d.ignoreCache {
runOpt = append(runOpt, llb.IgnoreCache)
}
run := img.Run(append(runOpt, mounts...)...)
d.state = run.AddMount("/dest", d.state).Platform(opt.targetPlatform)
if d.platform != nil {
d.state = d.state.Platform(*d.platform)
}
return commitToHistory(&d.image, commitMessage.String(), true, &d.state)
}
@ -713,7 +746,7 @@ func dispatchHealthcheck(d *dispatchState, c *instructions.HealthCheckCommand) e
func dispatchExpose(d *dispatchState, c *instructions.ExposeCommand, shlex *shell.Lex) error {
ports := []string{}
for _, p := range c.Ports {
ps, err := shlex.ProcessWords(p, toEnvList(d.buildArgs, d.image.Config.Env))
ps, err := shlex.ProcessWordsWithMap(p, toEnvMap(d.buildArgs, d.image.Config.Env))
if err != nil {
return err
}
@ -818,15 +851,12 @@ func splitWildcards(name string) (string, string) {
return path.Dir(name[:i]), base + name[i:]
}
func addEnv(env []string, k, v string, override bool) []string {
func addEnv(env []string, k, v string) []string {
gotOne := false
for i, envVar := range env {
envParts := strings.SplitN(envVar, "=", 2)
compareFrom := envParts[0]
if shell.EqualEnvKeys(compareFrom, k) {
if override {
env[i] = k + "=" + v
}
key, _ := parseKeyValue(envVar)
if shell.EqualEnvKeys(key, k) {
env[i] = k + "=" + v
gotOne = true
break
}
@ -837,6 +867,16 @@ func addEnv(env []string, k, v string, override bool) []string {
return env
}
func parseKeyValue(env string) (string, string) {
parts := strings.SplitN(env, "=", 2)
v := ""
if len(parts) > 1 {
v = parts[1]
}
return parts[0], v
}
func setKVValue(kvpo instructions.KeyValuePairOptional, values map[string]string) instructions.KeyValuePairOptional {
if v, ok := values[kvpo.Key]; ok {
kvpo.Value = &v
@ -844,11 +884,17 @@ func setKVValue(kvpo instructions.KeyValuePairOptional, values map[string]string
return kvpo
}
func toEnvList(args []instructions.KeyValuePairOptional, env []string) []string {
func toEnvMap(args []instructions.KeyValuePairOptional, env []string) map[string]string {
m := shell.BuildEnvs(env)
for _, arg := range args {
env = addEnv(env, arg.Key, arg.ValueString(), false)
// If key already exists, keep previous value.
if _, ok := m[arg.Key]; ok {
continue
}
m[arg.Key] = arg.ValueString()
}
return env
return m
}
func dfCmd(cmd interface{}) llb.ConstraintsOpt {
@ -1028,3 +1074,37 @@ func autoDetectPlatform(img Image, target specs.Platform, supported []specs.Plat
}
return target
}
func WithInternalName(name string, a ...interface{}) llb.ConstraintsOpt {
return llb.WithCustomName("[internal] "+name, a...)
}
func uppercaseCmd(str string) string {
p := strings.SplitN(str, " ", 2)
p[0] = strings.ToUpper(p[0])
return strings.Join(p, " ")
}
func processCmdEnv(shlex *shell.Lex, cmd string, env []string) string {
w, err := shlex.ProcessWord(cmd, env)
if err != nil {
return cmd
}
return w
}
func prefixCommand(ds *dispatchState, str string, prefixPlatform bool, platform *specs.Platform) string {
if ds.cmdTotal == 0 {
return str
}
out := "["
if prefixPlatform && platform != nil {
out += platforms.Format(*platform) + " "
}
if ds.stageName != "" {
out += ds.stageName + " "
}
ds.cmdIndex++
out += fmt.Sprintf("%d/%d] ", ds.cmdIndex, ds.cmdTotal)
return out + str
}

View file

@ -0,0 +1,58 @@
package dockerfile2llb
import (
"github.com/containerd/containerd/platforms"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
specs "github.com/opencontainers/image-spec/specs-go/v1"
)
type platformOpt struct {
targetPlatform specs.Platform
buildPlatforms []specs.Platform
implicitTarget bool
}
func buildPlatformOpt(opt *ConvertOpt) *platformOpt {
buildPlatforms := opt.BuildPlatforms
targetPlatform := opt.TargetPlatform
implicitTargetPlatform := false
if opt.TargetPlatform != nil && opt.BuildPlatforms == nil {
buildPlatforms = []specs.Platform{*opt.TargetPlatform}
}
if len(buildPlatforms) == 0 {
buildPlatforms = []specs.Platform{platforms.DefaultSpec()}
}
if opt.TargetPlatform == nil {
implicitTargetPlatform = true
targetPlatform = &buildPlatforms[0]
}
return &platformOpt{
targetPlatform: *targetPlatform,
buildPlatforms: buildPlatforms,
implicitTarget: implicitTargetPlatform,
}
}
func getPlatformArgs(po *platformOpt) []instructions.KeyValuePairOptional {
bp := po.buildPlatforms[0]
tp := po.targetPlatform
m := map[string]string{
"BUILDPLATFORM": platforms.Format(bp),
"BUILDOS": bp.OS,
"BUILDARCH": bp.Architecture,
"BUILDVARIANT": bp.Variant,
"TARGETPLATFORM": platforms.Format(tp),
"TARGETOS": tp.OS,
"TARGETARCH": tp.Architecture,
"TARGETVARIANT": tp.Variant,
}
opts := make([]instructions.KeyValuePairOptional, 0, len(m))
for k, v := range m {
s := v
opts = append(opts, instructions.KeyValuePairOptional{Key: k, Value: &s})
}
return opts
}

View file

@ -139,7 +139,8 @@ func (e *parseError) Error() string {
return fmt.Sprintf("Dockerfile parse error line %d: %v", e.node.StartLine, e.inner.Error())
}
// Parse a docker file into a collection of buildable stages
// Parse a Dockerfile into a collection of buildable stages.
// metaArgs is a collection of ARG instructions that occur before the first FROM.
func Parse(ast *parser.Node) (stages []Stage, metaArgs []ArgCommand, err error) {
for _, n := range ast.Children {
cmd, err := ParseInstruction(n)

View file

@ -28,7 +28,7 @@ func NewLex(escapeToken rune) *Lex {
// ProcessWord will use the 'env' list of environment variables,
// and replace any env var references in 'word'.
func (s *Lex) ProcessWord(word string, env []string) (string, error) {
word, _, err := s.process(word, env)
word, _, err := s.process(word, BuildEnvs(env))
return word, err
}
@ -40,11 +40,23 @@ func (s *Lex) ProcessWord(word string, env []string) (string, error) {
// Note, each one is trimmed to remove leading and trailing spaces (unless
// they are quoted", but ProcessWord retains spaces between words.
func (s *Lex) ProcessWords(word string, env []string) ([]string, error) {
_, words, err := s.process(word, BuildEnvs(env))
return words, err
}
// ProcessWordWithMap will use the 'env' list of environment variables,
// and replace any env var references in 'word'.
func (s *Lex) ProcessWordWithMap(word string, env map[string]string) (string, error) {
word, _, err := s.process(word, env)
return word, err
}
func (s *Lex) ProcessWordsWithMap(word string, env map[string]string) ([]string, error) {
_, words, err := s.process(word, env)
return words, err
}
func (s *Lex) process(word string, env []string) (string, []string, error) {
func (s *Lex) process(word string, env map[string]string) (string, []string, error) {
sw := &shellWord{
envs: env,
escapeToken: s.escapeToken,
@ -55,7 +67,7 @@ func (s *Lex) process(word string, env []string) (string, []string, error) {
type shellWord struct {
scanner scanner.Scanner
envs []string
envs map[string]string
escapeToken rune
}
@ -353,21 +365,33 @@ func isSpecialParam(char rune) bool {
}
func (sw *shellWord) getEnv(name string) string {
for _, env := range sw.envs {
i := strings.Index(env, "=")
if i < 0 {
if EqualEnvKeys(name, env) {
// Should probably never get here, but just in case treat
// it like "var" and "var=" are the same
return ""
}
continue
for key, value := range sw.envs {
if EqualEnvKeys(name, key) {
return value
}
compareName := env[:i]
if !EqualEnvKeys(name, compareName) {
continue
}
return env[i+1:]
}
return ""
}
func BuildEnvs(env []string) map[string]string {
envs := map[string]string{}
for _, e := range env {
i := strings.Index(e, "=")
if i < 0 {
envs[e] = ""
} else {
k := e[:i]
v := e[i+1:]
// If key already exists, keep previous value.
if _, ok := envs[k]; ok {
continue
}
envs[k] = v
}
}
return envs
}

View file

@ -7,9 +7,8 @@ import (
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/executor"
gatewayclient "github.com/moby/buildkit/frontend/gateway/client"
gw "github.com/moby/buildkit/frontend/gateway/client"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go/v1"
)
type Frontend interface {
@ -18,11 +17,11 @@ type Frontend interface {
type FrontendLLBBridge interface {
Solve(ctx context.Context, req SolveRequest) (*Result, error)
ResolveImageConfig(ctx context.Context, ref string, platform *specs.Platform) (digest.Digest, []byte, error)
ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error)
Exec(ctx context.Context, meta executor.Meta, rootfs cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error
}
type SolveRequest = gatewayclient.SolveRequest
type SolveRequest = gw.SolveRequest
type WorkerInfos interface {
WorkerInfos() []client.WorkerInfo

View file

@ -10,7 +10,7 @@ import (
type Client interface {
Solve(ctx context.Context, req SolveRequest) (*Result, error)
ResolveImageConfig(ctx context.Context, ref string, platform *specs.Platform) (digest.Digest, []byte, error)
ResolveImageConfig(ctx context.Context, ref string, opt ResolveImageConfigOpt) (digest.Digest, []byte, error)
BuildOpts() BuildOpts
}
@ -50,3 +50,9 @@ type BuildOpts struct {
Workers []WorkerInfo
Product string
}
type ResolveImageConfigOpt struct {
Platform *specs.Platform
ResolveMode string
LogName string
}

View file

@ -14,9 +14,12 @@ import (
"github.com/docker/distribution/reference"
apitypes "github.com/moby/buildkit/api/types"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/frontend"
gw "github.com/moby/buildkit/frontend/gateway/client"
pb "github.com/moby/buildkit/frontend/gateway/pb"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
@ -37,9 +40,8 @@ import (
)
const (
keySource = "source"
keyDevel = "gateway-devel"
exporterImageConfig = "containerimage.config"
keySource = "source"
keyDevel = "gateway-devel"
)
func NewGatewayFrontend(w frontend.WorkerInfos) frontend.Frontend {
@ -97,7 +99,7 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten
return nil, errors.Errorf("invalid ref: %T", devRes.Ref.Sys())
}
rootFS = workerRef.ImmutableRef
config, ok := devRes.Metadata[exporterImageConfig]
config, ok := devRes.Metadata[exptypes.ExporterImageConfigKey]
if ok {
if err := json.Unmarshal(config, &img); err != nil {
return nil, err
@ -109,7 +111,7 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten
return nil, err
}
dgst, config, err := llbBridge.ResolveImageConfig(ctx, reference.TagNameOnly(sourceRef).String(), nil) // TODO:
dgst, config, err := llbBridge.ResolveImageConfig(ctx, reference.TagNameOnly(sourceRef).String(), gw.ResolveImageConfigOpt{})
if err != nil {
return nil, err
}
@ -125,7 +127,7 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten
}
}
src := llb.Image(sourceRef.String())
src := llb.Image(sourceRef.String(), &markTypeFrontend{})
def, err := src.Marshal()
if err != nil {
@ -322,7 +324,11 @@ func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.R
OSFeatures: p.OSFeatures,
}
}
dgst, dt, err := lbf.llbBridge.ResolveImageConfig(ctx, req.Ref, platform)
dgst, dt, err := lbf.llbBridge.ResolveImageConfig(ctx, req.Ref, gw.ResolveImageConfigOpt{
Platform: platform,
ResolveMode: req.ResolveMode,
LogName: req.LogName,
})
if err != nil {
return nil, err
}
@ -453,7 +459,7 @@ func (lbf *llbBridgeForwarder) Ping(context.Context, *pb.PingRequest) (*pb.PongR
return &pb.PongResponse{
FrontendAPICaps: pb.Caps.All(),
Workers: pbWorkers,
// TODO: add LLB info
LLBCaps: opspb.Caps.All(),
}, nil
}
@ -511,3 +517,9 @@ func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) {
logrus.Debugf("serving grpc connection")
(&http2.Server{}).ServeConn(conn, &http2.ServeConnOpts{Handler: grpcServer})
}
type markTypeFrontend struct{}
func (*markTypeFrontend) SetImageOption(ii *llb.ImageInfo) {
ii.RecordType = string(client.UsageRecordTypeFrontend)
}

View file

@ -9,12 +9,13 @@ var Caps apicaps.CapList
// considered immutable. After a capability is marked stable it should not be disabled.
const (
CapSolveBase apicaps.CapID = "solve.base"
CapSolveInlineReturn apicaps.CapID = "solve.inlinereturn"
CapResolveImage apicaps.CapID = "resolveimage"
CapReadFile apicaps.CapID = "readfile"
CapReturnResult apicaps.CapID = "return"
CapReturnMap apicaps.CapID = "returnmap"
CapSolveBase apicaps.CapID = "solve.base"
CapSolveInlineReturn apicaps.CapID = "solve.inlinereturn"
CapResolveImage apicaps.CapID = "resolveimage"
CapResolveImageResolveMode apicaps.CapID = "resolveimage.resolvemode"
CapReadFile apicaps.CapID = "readfile"
CapReturnResult apicaps.CapID = "return"
CapReturnMap apicaps.CapID = "returnmap"
)
func init() {
@ -40,6 +41,13 @@ func init() {
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapResolveImageResolveMode,
Name: "resolve remote image config with custom resolvemode",
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapReadFile,
Name: "read static file",

View file

@ -227,8 +227,10 @@ func (*ReturnResponse) ProtoMessage() {}
func (*ReturnResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{3} }
type ResolveImageConfigRequest struct {
Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"`
Platform *pb.Platform `protobuf:"bytes,2,opt,name=Platform" json:"Platform,omitempty"`
Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"`
Platform *pb.Platform `protobuf:"bytes,2,opt,name=Platform" json:"Platform,omitempty"`
ResolveMode string `protobuf:"bytes,3,opt,name=ResolveMode,proto3" json:"ResolveMode,omitempty"`
LogName string `protobuf:"bytes,4,opt,name=LogName,proto3" json:"LogName,omitempty"`
}
func (m *ResolveImageConfigRequest) Reset() { *m = ResolveImageConfigRequest{} }
@ -250,6 +252,20 @@ func (m *ResolveImageConfigRequest) GetPlatform() *pb.Platform {
return nil
}
func (m *ResolveImageConfigRequest) GetResolveMode() string {
if m != nil {
return m.ResolveMode
}
return ""
}
func (m *ResolveImageConfigRequest) GetLogName() string {
if m != nil {
return m.LogName
}
return ""
}
type ResolveImageConfigResponse struct {
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=Digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"Digest"`
Config []byte `protobuf:"bytes,2,opt,name=Config,proto3" json:"Config,omitempty"`
@ -890,6 +906,18 @@ func (m *ResolveImageConfigRequest) MarshalTo(dAtA []byte) (int, error) {
}
i += n5
}
if len(m.ResolveMode) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintGateway(dAtA, i, uint64(len(m.ResolveMode)))
i += copy(dAtA[i:], m.ResolveMode)
}
if len(m.LogName) > 0 {
dAtA[i] = 0x22
i++
i = encodeVarintGateway(dAtA, i, uint64(len(m.LogName)))
i += copy(dAtA[i:], m.LogName)
}
return i, nil
}
@ -1304,6 +1332,14 @@ func (m *ResolveImageConfigRequest) Size() (n int) {
l = m.Platform.Size()
n += 1 + l + sovGateway(uint64(l))
}
l = len(m.ResolveMode)
if l > 0 {
n += 1 + l + sovGateway(uint64(l))
}
l = len(m.LogName)
if l > 0 {
n += 1 + l + sovGateway(uint64(l))
}
return n
}
@ -2111,6 +2147,64 @@ func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ResolveMode", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGateway
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGateway
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ResolveMode = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LogName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGateway
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGateway
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.LogName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGateway(dAtA[iNdEx:])
@ -3295,66 +3389,68 @@ var (
func init() { proto.RegisterFile("gateway.proto", fileDescriptorGateway) }
var fileDescriptorGateway = []byte{
// 969 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0x4b, 0x6f, 0xdb, 0x46,
0x10, 0x0e, 0x4d, 0x49, 0x91, 0x46, 0x52, 0xac, 0x2e, 0x8a, 0x42, 0xe1, 0xc1, 0x51, 0x89, 0x22,
0x65, 0xf3, 0x20, 0x51, 0xa5, 0x45, 0xd2, 0x04, 0x48, 0x1b, 0xd9, 0x31, 0xe2, 0x56, 0x41, 0x84,
0xcd, 0xc1, 0x40, 0xd0, 0x1e, 0x56, 0xd2, 0x92, 0x26, 0x4c, 0x71, 0xd9, 0xe5, 0xca, 0xae, 0xd0,
0x4b, 0xdb, 0x53, 0x7e, 0x5a, 0x8e, 0x3d, 0xf7, 0x10, 0x14, 0xbe, 0xf5, 0x5f, 0x14, 0xfb, 0xa0,
0x4c, 0xbf, 0x64, 0xfb, 0xa4, 0x9d, 0xe5, 0x7c, 0x33, 0xdf, 0xce, 0x7c, 0xb3, 0x2b, 0x68, 0x47,
0x44, 0xd0, 0x43, 0xb2, 0xf0, 0x33, 0xce, 0x04, 0x43, 0xb7, 0x67, 0x6c, 0xbc, 0xf0, 0xc7, 0xf3,
0x38, 0x99, 0xee, 0xc7, 0xc2, 0x3f, 0xf8, 0xda, 0x0f, 0x39, 0x4b, 0x05, 0x4d, 0xa7, 0xce, 0xc3,
0x28, 0x16, 0x7b, 0xf3, 0xb1, 0x3f, 0x61, 0xb3, 0x20, 0x62, 0x11, 0x0b, 0x14, 0x62, 0x3c, 0x0f,
0x95, 0xa5, 0x0c, 0xb5, 0xd2, 0x91, 0x9c, 0xfe, 0x69, 0xf7, 0x88, 0xb1, 0x28, 0xa1, 0x24, 0x8b,
0x73, 0xb3, 0x0c, 0x78, 0x36, 0x09, 0x72, 0x41, 0xc4, 0x3c, 0x37, 0x98, 0x07, 0x25, 0x8c, 0x24,
0x12, 0x14, 0x44, 0x82, 0x9c, 0x25, 0x07, 0x94, 0x07, 0xd9, 0x38, 0x60, 0x59, 0xe1, 0x1d, 0x5c,
0xe8, 0x4d, 0xb2, 0x38, 0x10, 0x8b, 0x8c, 0xe6, 0xc1, 0x21, 0xe3, 0xfb, 0x94, 0x1b, 0xc0, 0xa3,
0x0b, 0x01, 0x73, 0x11, 0x27, 0x12, 0x35, 0x21, 0x59, 0x2e, 0x93, 0xc8, 0x5f, 0x0d, 0x72, 0xff,
0xb3, 0xa0, 0x86, 0x69, 0x3e, 0x4f, 0x04, 0x42, 0x60, 0x73, 0x1a, 0x76, 0xad, 0x9e, 0xe5, 0x35,
0x5e, 0xdd, 0xc0, 0xd2, 0x40, 0x8f, 0xa1, 0xc2, 0x69, 0x98, 0x77, 0xd7, 0x7a, 0x96, 0xd7, 0xec,
0x7f, 0xee, 0x5f, 0x58, 0x3f, 0x1f, 0xd3, 0xf0, 0x35, 0xc9, 0x5e, 0xdd, 0xc0, 0x0a, 0x80, 0x7e,
0x82, 0xfa, 0x8c, 0x0a, 0x32, 0x25, 0x82, 0x74, 0xa1, 0x67, 0x7b, 0xcd, 0x7e, 0xb0, 0x12, 0x2c,
0x19, 0xf8, 0xaf, 0x0d, 0xe2, 0x65, 0x2a, 0xf8, 0x02, 0x2f, 0x03, 0x38, 0xcf, 0xa0, 0x7d, 0xe2,
0x13, 0xea, 0x80, 0xbd, 0x4f, 0x17, 0x9a, 0x2a, 0x96, 0x4b, 0xf4, 0x29, 0x54, 0x0f, 0x48, 0x32,
0xa7, 0x8a, 0x69, 0x0b, 0x6b, 0xe3, 0xe9, 0xda, 0x13, 0x6b, 0x50, 0x87, 0x1a, 0x57, 0xe1, 0xdd,
0xbf, 0xd4, 0x59, 0x25, 0x4d, 0xf4, 0xbd, 0x39, 0x97, 0xa5, 0xa8, 0xdd, 0xbf, 0xf4, 0x5c, 0xf2,
0x27, 0xd7, 0xb4, 0x14, 0xd0, 0x79, 0x0c, 0x8d, 0xe5, 0xd6, 0x65, 0x74, 0x1a, 0x25, 0x3a, 0xae,
0x80, 0x36, 0xa6, 0x62, 0xce, 0x53, 0x4c, 0x7f, 0x9d, 0xd3, 0x5c, 0xa0, 0xef, 0x0a, 0x7e, 0x0a,
0x7f, 0x59, 0x91, 0xa5, 0x23, 0x36, 0x00, 0xe4, 0x41, 0x95, 0x72, 0xce, 0xb8, 0x69, 0x0f, 0xf2,
0xb5, 0xf2, 0x7c, 0x9e, 0x4d, 0xfc, 0xb7, 0x4a, 0x79, 0x58, 0x3b, 0xb8, 0x1d, 0xb8, 0x55, 0x64,
0xcd, 0x33, 0x96, 0xe6, 0xd4, 0xdd, 0x85, 0xdb, 0x98, 0x2a, 0xdd, 0xed, 0xcc, 0x48, 0x44, 0x37,
0x59, 0x1a, 0xc6, 0x51, 0xc1, 0xa9, 0x03, 0x36, 0x2e, 0xa4, 0x80, 0xe5, 0x12, 0x79, 0x50, 0x1f,
0x25, 0x44, 0x84, 0x8c, 0xcf, 0x4c, 0xb6, 0x96, 0x9f, 0x8d, 0xfd, 0x62, 0x0f, 0x2f, 0xbf, 0xba,
0x7f, 0x58, 0xe0, 0x9c, 0x17, 0x59, 0xe7, 0x45, 0x3f, 0x42, 0x6d, 0x2b, 0x8e, 0x68, 0xae, 0x8f,
0xdb, 0x18, 0xf4, 0x3f, 0x7c, 0xbc, 0x73, 0xe3, 0x9f, 0x8f, 0x77, 0xee, 0x95, 0xd4, 0xcb, 0x32,
0x9a, 0x4e, 0x58, 0x2a, 0x48, 0x9c, 0x52, 0x2e, 0xe7, 0xe9, 0xe1, 0x54, 0x41, 0x7c, 0x8d, 0xc4,
0x26, 0x02, 0xfa, 0x0c, 0x6a, 0x3a, 0xba, 0xe9, 0xba, 0xb1, 0xdc, 0xf7, 0x36, 0xb4, 0xde, 0x4a,
0x02, 0xc5, 0x79, 0x7c, 0x80, 0x2d, 0x1a, 0xc6, 0x69, 0x2c, 0x62, 0x96, 0x9a, 0x3a, 0xdf, 0x92,
0xfc, 0x8f, 0x77, 0x71, 0xc9, 0x03, 0x39, 0x50, 0xdf, 0x36, 0x35, 0x37, 0x1d, 0x5c, 0xda, 0xe8,
0x1d, 0x34, 0x8b, 0xf5, 0x9b, 0x4c, 0x74, 0x6d, 0xa5, 0xa0, 0x27, 0x2b, 0x9a, 0x56, 0x66, 0xe2,
0x97, 0xa0, 0x5a, 0x4e, 0xe5, 0x60, 0xc8, 0x83, 0xf5, 0x9d, 0x59, 0xc6, 0xb8, 0xd8, 0x24, 0x93,
0x3d, 0x2a, 0x05, 0xd6, 0xad, 0xf4, 0x6c, 0xaf, 0x81, 0x4f, 0x6f, 0xa3, 0x07, 0xf0, 0x09, 0x49,
0x12, 0x76, 0x68, 0x14, 0xa1, 0x7a, 0xdb, 0xad, 0xf6, 0x2c, 0xaf, 0x8e, 0xcf, 0x7e, 0x90, 0x72,
0xdc, 0x8e, 0x53, 0x92, 0x74, 0x41, 0x79, 0x68, 0x03, 0xb9, 0xd0, 0x7a, 0xf9, 0x9b, 0x0c, 0x4b,
0xf9, 0x0b, 0x21, 0x78, 0xb7, 0xa9, 0x8a, 0x78, 0x62, 0xcf, 0x79, 0x0e, 0x9d, 0xd3, 0x94, 0xaf,
0x25, 0xf7, 0x9f, 0xa1, 0x6d, 0xce, 0x6f, 0xfa, 0xdf, 0x29, 0xdd, 0x32, 0xfa, 0x8e, 0x39, 0x1e,
0x00, 0xfb, 0x9a, 0x03, 0xe0, 0xfe, 0x0e, 0xeb, 0x98, 0x92, 0xe9, 0x76, 0x9c, 0xd0, 0x8b, 0xa5,
0x2b, 0x9b, 0x19, 0x27, 0x74, 0x44, 0xc4, 0xde, 0xb2, 0x99, 0xc6, 0x46, 0x4f, 0xa1, 0x8a, 0x49,
0x1a, 0x51, 0x93, 0xfa, 0x8b, 0x15, 0xa9, 0x55, 0x12, 0xe9, 0x8b, 0x35, 0xc4, 0x7d, 0x06, 0x8d,
0xe5, 0x9e, 0x94, 0xe2, 0x9b, 0x30, 0xcc, 0xa9, 0x96, 0xb5, 0x8d, 0x8d, 0x25, 0xf7, 0x87, 0x34,
0x8d, 0x4c, 0x6a, 0x1b, 0x1b, 0xcb, 0xbd, 0x0b, 0x9d, 0x63, 0xe6, 0xa6, 0x34, 0x08, 0x2a, 0x5b,
0xf2, 0xbe, 0xb4, 0x54, 0x1f, 0xd4, 0xda, 0x6d, 0x43, 0x73, 0x14, 0xa7, 0xc5, 0x60, 0xba, 0x47,
0x16, 0xb4, 0x46, 0x2c, 0x3d, 0x1e, 0xa7, 0x11, 0xac, 0x17, 0xfd, 0x79, 0x31, 0xda, 0xd9, 0x24,
0x59, 0x71, 0xa7, 0xf5, 0xce, 0x1e, 0xc5, 0xbc, 0x00, 0xbe, 0x76, 0x1c, 0x54, 0xe4, 0xe4, 0xe1,
0xd3, 0x70, 0xf4, 0x03, 0xdc, 0x1c, 0x0e, 0x07, 0x2a, 0xd2, 0xda, 0xb5, 0x22, 0x15, 0x30, 0xf4,
0x1c, 0x6e, 0xee, 0xaa, 0x87, 0x29, 0x37, 0xd3, 0x71, 0x4e, 0x59, 0xd5, 0xfb, 0xe5, 0x6b, 0x37,
0x4c, 0x27, 0x8c, 0x4f, 0x71, 0x01, 0xea, 0xbf, 0xaf, 0x40, 0x63, 0x38, 0x1c, 0x0c, 0x78, 0x3c,
0x8d, 0x28, 0xfa, 0xd3, 0x02, 0x74, 0xf6, 0x3e, 0x41, 0xdf, 0xac, 0x56, 0xc9, 0xf9, 0x17, 0x9b,
0xf3, 0xed, 0x35, 0x51, 0xa6, 0xca, 0xef, 0xa0, 0xaa, 0x54, 0x8c, 0xbe, 0xbc, 0xe2, 0x9c, 0x3b,
0xde, 0xe5, 0x8e, 0x26, 0xf6, 0x04, 0xea, 0x85, 0x12, 0xd0, 0xbd, 0x95, 0xf4, 0x4e, 0x08, 0xdd,
0xb9, 0x7f, 0x25, 0x5f, 0x93, 0x64, 0x17, 0x2a, 0x52, 0x46, 0xe8, 0xee, 0x0a, 0x50, 0x49, 0x67,
0xce, 0xaa, 0x73, 0x9e, 0xd0, 0xdf, 0x2f, 0xf2, 0x49, 0x55, 0x77, 0x8c, 0xb7, 0x92, 0x4f, 0xe9,
0xc5, 0x73, 0xbe, 0xba, 0x82, 0xa7, 0x0e, 0x3f, 0x68, 0x7d, 0x38, 0xda, 0xb0, 0xfe, 0x3e, 0xda,
0xb0, 0xfe, 0x3d, 0xda, 0xb0, 0xc6, 0x35, 0xf5, 0x9f, 0xe5, 0xd1, 0xff, 0x01, 0x00, 0x00, 0xff,
0xff, 0xb8, 0xcb, 0x8c, 0xfa, 0xd6, 0x09, 0x00, 0x00,
// 999 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xcf, 0x6f, 0xdb, 0x36,
0x14, 0x8e, 0x22, 0xdb, 0xb1, 0x9f, 0xed, 0xc6, 0x23, 0x86, 0x41, 0xd5, 0x21, 0xf5, 0x84, 0xa1,
0xd3, 0xfa, 0x43, 0xc2, 0xdc, 0x0d, 0xed, 0x5a, 0xa0, 0x5b, 0x9d, 0x34, 0x68, 0x36, 0x67, 0x35,
0xd8, 0x43, 0x81, 0x62, 0x3b, 0xd0, 0x36, 0xa5, 0x08, 0x91, 0x45, 0x8d, 0xa2, 0x93, 0x19, 0xbb,
0x6c, 0x3b, 0xf5, 0xbe, 0x7f, 0xaa, 0xc7, 0x9d, 0x77, 0x08, 0x86, 0xdc, 0xf6, 0x5f, 0x0c, 0xa4,
0x28, 0x47, 0xf9, 0xe5, 0x24, 0x27, 0xf3, 0x51, 0xef, 0x7b, 0xef, 0xe3, 0x7b, 0xdf, 0x23, 0x0d,
0xed, 0x90, 0x08, 0x7a, 0x48, 0xe6, 0x5e, 0xca, 0x99, 0x60, 0xe8, 0xf6, 0x94, 0x8d, 0xe6, 0xde,
0x68, 0x16, 0xc5, 0x93, 0xfd, 0x48, 0x78, 0x07, 0x5f, 0x7a, 0x01, 0x67, 0x89, 0xa0, 0xc9, 0xc4,
0x7e, 0x18, 0x46, 0x62, 0x6f, 0x36, 0xf2, 0xc6, 0x6c, 0xea, 0x87, 0x2c, 0x64, 0xbe, 0x42, 0x8c,
0x66, 0x81, 0xb2, 0x94, 0xa1, 0x56, 0x79, 0x24, 0xbb, 0x77, 0xd6, 0x3d, 0x64, 0x2c, 0x8c, 0x29,
0x49, 0xa3, 0x4c, 0x2f, 0x7d, 0x9e, 0x8e, 0xfd, 0x4c, 0x10, 0x31, 0xcb, 0x34, 0xe6, 0x41, 0x09,
0x23, 0x89, 0xf8, 0x05, 0x11, 0x3f, 0x63, 0xf1, 0x01, 0xe5, 0x7e, 0x3a, 0xf2, 0x59, 0x5a, 0x78,
0xfb, 0x97, 0x7a, 0x93, 0x34, 0xf2, 0xc5, 0x3c, 0xa5, 0x99, 0x7f, 0xc8, 0xf8, 0x3e, 0xe5, 0x1a,
0xf0, 0xe8, 0x52, 0xc0, 0x4c, 0x44, 0xb1, 0x44, 0x8d, 0x49, 0x9a, 0xc9, 0x24, 0xf2, 0x37, 0x07,
0x39, 0xff, 0x19, 0x50, 0xc3, 0x34, 0x9b, 0xc5, 0x02, 0x21, 0x30, 0x39, 0x0d, 0x2c, 0xa3, 0x6b,
0xb8, 0x8d, 0x57, 0x2b, 0x58, 0x1a, 0xe8, 0x31, 0x54, 0x38, 0x0d, 0x32, 0x6b, 0xb5, 0x6b, 0xb8,
0xcd, 0xde, 0xa7, 0xde, 0xa5, 0xf5, 0xf3, 0x30, 0x0d, 0x76, 0x49, 0xfa, 0x6a, 0x05, 0x2b, 0x00,
0xfa, 0x01, 0xea, 0x53, 0x2a, 0xc8, 0x84, 0x08, 0x62, 0x41, 0xd7, 0x74, 0x9b, 0x3d, 0x7f, 0x29,
0x58, 0x32, 0xf0, 0x76, 0x35, 0xe2, 0x65, 0x22, 0xf8, 0x1c, 0x2f, 0x02, 0xd8, 0xcf, 0xa0, 0x7d,
0xea, 0x13, 0xea, 0x80, 0xb9, 0x4f, 0xe7, 0x39, 0x55, 0x2c, 0x97, 0xe8, 0x63, 0xa8, 0x1e, 0x90,
0x78, 0x46, 0x15, 0xd3, 0x16, 0xce, 0x8d, 0xa7, 0xab, 0x4f, 0x8c, 0x7e, 0x1d, 0x6a, 0x5c, 0x85,
0x77, 0xfe, 0x54, 0x67, 0x95, 0x34, 0xd1, 0xb7, 0xfa, 0x5c, 0x86, 0xa2, 0x76, 0xff, 0xca, 0x73,
0xc9, 0x9f, 0x2c, 0xa7, 0xa5, 0x80, 0xf6, 0x63, 0x68, 0x2c, 0xb6, 0xae, 0xa2, 0xd3, 0x28, 0xd1,
0x71, 0x04, 0xb4, 0x31, 0x15, 0x33, 0x9e, 0x60, 0xfa, 0xcb, 0x8c, 0x66, 0x02, 0x7d, 0x53, 0xf0,
0x53, 0xf8, 0xab, 0x8a, 0x2c, 0x1d, 0xb1, 0x06, 0x20, 0x17, 0xaa, 0x94, 0x73, 0xc6, 0x75, 0x7b,
0x90, 0x97, 0x2b, 0xcf, 0xe3, 0xe9, 0xd8, 0x7b, 0xa3, 0x94, 0x87, 0x73, 0x07, 0xa7, 0x03, 0xb7,
0x8a, 0xac, 0x59, 0xca, 0x92, 0x8c, 0x3a, 0x7f, 0x19, 0x70, 0x1b, 0x53, 0x25, 0xbc, 0x9d, 0x29,
0x09, 0xe9, 0x26, 0x4b, 0x82, 0x28, 0x2c, 0x48, 0x75, 0xc0, 0xc4, 0x85, 0x16, 0xb0, 0x5c, 0x22,
0x17, 0xea, 0xc3, 0x98, 0x88, 0x80, 0xf1, 0xa9, 0x4e, 0xd7, 0xf2, 0xd2, 0x91, 0x57, 0xec, 0xe1,
0xc5, 0x57, 0xd4, 0x85, 0xa6, 0x0e, 0xbc, 0xcb, 0x26, 0xd4, 0x32, 0x55, 0x8c, 0xf2, 0x16, 0xb2,
0x60, 0x6d, 0xc0, 0xc2, 0x1f, 0xc9, 0x94, 0x5a, 0x15, 0xf5, 0xb5, 0x30, 0x9d, 0xdf, 0x0d, 0xb0,
0x2f, 0x62, 0x95, 0x93, 0x46, 0xdf, 0x43, 0x6d, 0x2b, 0x0a, 0x69, 0x96, 0xd7, 0xaa, 0xd1, 0xef,
0x7d, 0x38, 0xba, 0xb3, 0xf2, 0xcf, 0xd1, 0x9d, 0x7b, 0x25, 0xe9, 0xb3, 0x94, 0x26, 0x63, 0x96,
0x08, 0x12, 0x25, 0x94, 0xcb, 0x61, 0x7c, 0x38, 0x51, 0x10, 0x2f, 0x47, 0x62, 0x1d, 0x01, 0x7d,
0x02, 0xb5, 0x3c, 0xba, 0x96, 0x8c, 0xb6, 0x9c, 0xf7, 0x26, 0xb4, 0xde, 0x48, 0x02, 0x45, 0x2d,
0x3c, 0x80, 0x2d, 0x1a, 0x44, 0x49, 0x24, 0x22, 0x96, 0xe8, 0x26, 0xdd, 0x92, 0x67, 0x3f, 0xd9,
0xc5, 0x25, 0x0f, 0x64, 0x43, 0x7d, 0x5b, 0x37, 0x4c, 0xb7, 0x7f, 0x61, 0xa3, 0x77, 0xd0, 0x2c,
0xd6, 0xaf, 0x53, 0x61, 0x99, 0x4a, 0x7e, 0x4f, 0x96, 0x74, 0xbc, 0xcc, 0xc4, 0x2b, 0x41, 0x73,
0x2d, 0x96, 0x83, 0x21, 0x17, 0xd6, 0x77, 0xa6, 0x29, 0xe3, 0x62, 0x93, 0x8c, 0xf7, 0xa8, 0x54,
0xa7, 0x55, 0xe9, 0x9a, 0x6e, 0x03, 0x9f, 0xdd, 0x46, 0x0f, 0xe0, 0x23, 0x12, 0xc7, 0xec, 0x50,
0xcb, 0x49, 0x09, 0xc3, 0xaa, 0x76, 0x0d, 0xb7, 0x8e, 0xcf, 0x7f, 0x90, 0x5a, 0xde, 0x8e, 0x12,
0x12, 0x5b, 0xa0, 0x3c, 0x72, 0x03, 0x39, 0xd0, 0x7a, 0xf9, 0xab, 0x0c, 0x4b, 0xf9, 0x0b, 0x21,
0xb8, 0xd5, 0x54, 0x45, 0x3c, 0xb5, 0x67, 0x3f, 0x87, 0xce, 0x59, 0xca, 0x37, 0x9a, 0x95, 0x9f,
0xa0, 0xad, 0xcf, 0xaf, 0xfb, 0xdf, 0x29, 0x5d, 0x51, 0xf9, 0x05, 0x75, 0x32, 0x3d, 0xe6, 0x0d,
0xa7, 0xc7, 0xf9, 0x0d, 0xd6, 0x31, 0x25, 0x93, 0xed, 0x28, 0xa6, 0x97, 0xcb, 0x5e, 0x36, 0x33,
0x8a, 0xe9, 0x90, 0x88, 0xbd, 0x45, 0x33, 0xb5, 0x8d, 0x9e, 0x42, 0x15, 0x93, 0x24, 0xa4, 0x3a,
0xf5, 0x67, 0x4b, 0x52, 0xab, 0x24, 0xd2, 0x17, 0xe7, 0x10, 0xe7, 0x19, 0x34, 0x16, 0x7b, 0x52,
0x8a, 0xaf, 0x83, 0x20, 0xa3, 0xb9, 0xac, 0x4d, 0xac, 0x2d, 0xb9, 0x3f, 0xa0, 0x49, 0xa8, 0x53,
0x9b, 0x58, 0x5b, 0xce, 0x5d, 0xe8, 0x9c, 0x30, 0xd7, 0xa5, 0x41, 0x50, 0xd9, 0x92, 0x97, 0xad,
0xa1, 0xfa, 0xa0, 0xd6, 0x4e, 0x1b, 0x9a, 0xc3, 0x28, 0x29, 0x86, 0xda, 0x39, 0x36, 0xa0, 0x35,
0x64, 0xc9, 0xc9, 0x38, 0x0d, 0x61, 0xbd, 0xe8, 0xcf, 0x8b, 0xe1, 0xce, 0x26, 0x49, 0x8b, 0x0b,
0xb1, 0x7b, 0xfe, 0x28, 0xfa, 0xf9, 0xf0, 0x72, 0xc7, 0x7e, 0x45, 0x4e, 0x1e, 0x3e, 0x0b, 0x47,
0xdf, 0xc1, 0xda, 0x60, 0xd0, 0x57, 0x91, 0x56, 0x6f, 0x14, 0xa9, 0x80, 0xa1, 0xe7, 0xb0, 0xf6,
0x56, 0xbd, 0x6a, 0x99, 0x9e, 0x8e, 0x0b, 0xca, 0xaa, 0x1e, 0x3f, 0x2f, 0x77, 0xc3, 0x74, 0xcc,
0xf8, 0x04, 0x17, 0xa0, 0xde, 0xfb, 0x0a, 0x34, 0x06, 0x83, 0x7e, 0x9f, 0x47, 0x93, 0x90, 0xa2,
0x3f, 0x0c, 0x40, 0xe7, 0xef, 0x13, 0xf4, 0xd5, 0x72, 0x95, 0x5c, 0x7c, 0x29, 0xda, 0x5f, 0xdf,
0x10, 0xa5, 0xab, 0xfc, 0x0e, 0xaa, 0x4a, 0xc5, 0xe8, 0xf3, 0x6b, 0xce, 0xb9, 0xed, 0x5e, 0xed,
0xa8, 0x63, 0x8f, 0xa1, 0x5e, 0x28, 0x01, 0xdd, 0x5b, 0x4a, 0xef, 0x94, 0xd0, 0xed, 0xfb, 0xd7,
0xf2, 0xd5, 0x49, 0xde, 0x42, 0x45, 0xca, 0x08, 0xdd, 0x5d, 0x02, 0x2a, 0xe9, 0xcc, 0x5e, 0x76,
0xce, 0x53, 0xfa, 0xfb, 0x59, 0xbe, 0xc7, 0xea, 0x8e, 0x71, 0x97, 0xf2, 0x29, 0x3d, 0x97, 0xf6,
0x17, 0xd7, 0xf0, 0xcc, 0xc3, 0xf7, 0x5b, 0x1f, 0x8e, 0x37, 0x8c, 0xbf, 0x8f, 0x37, 0x8c, 0x7f,
0x8f, 0x37, 0x8c, 0x51, 0x4d, 0xfd, 0xe1, 0x79, 0xf4, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0f,
0xfd, 0x24, 0x08, 0x13, 0x0a, 0x00, 0x00,
}

View file

@ -46,6 +46,8 @@ message ReturnResponse {
message ResolveImageConfigRequest {
string Ref = 1;
pb.Platform Platform = 2;
string ResolveMode = 3;
string LogName = 4;
}
message ResolveImageConfigResponse {

View file

@ -12,13 +12,8 @@ import (
"google.golang.org/grpc"
)
func sendDiffCopy(stream grpc.Stream, dir string, includes, excludes, followPaths []string, progress progressCb, _map func(*fsutil.Stat) bool) error {
return fsutil.Send(stream.Context(), stream, dir, &fsutil.WalkOpt{
ExcludePatterns: excludes,
IncludePatterns: includes,
FollowPaths: followPaths,
Map: _map,
}, progress)
func sendDiffCopy(stream grpc.Stream, fs fsutil.FS, progress progressCb) error {
return fsutil.Send(stream.Context(), stream, fs, progress)
}
func newStreamWriter(stream grpc.ClientStream) io.WriteCloser {

View file

@ -11,7 +11,9 @@ import (
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
const (
@ -79,7 +81,7 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retEr
dir, ok := sp.dirs[dirName]
if !ok {
return errors.Errorf("no access allowed to dir %q", dirName)
return status.Errorf(codes.NotFound, "no access allowed to dir %q", dirName)
}
excludes := opts[keyExcludePatterns]
@ -101,7 +103,12 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retEr
doneCh = sp.doneCh
sp.doneCh = nil
}
err := pr.sendFn(stream, dir.Dir, includes, excludes, followPaths, progress, dir.Map)
err := pr.sendFn(stream, fsutil.NewFS(dir.Dir, &fsutil.WalkOpt{
ExcludePatterns: excludes,
IncludePatterns: includes,
FollowPaths: followPaths,
Map: dir.Map,
}), progress)
if doneCh != nil {
if err != nil {
doneCh <- err
@ -120,7 +127,7 @@ type progressCb func(int, bool)
type protocol struct {
name string
sendFn func(stream grpc.Stream, srcDir string, includes, excludes, followPaths []string, progress progressCb, _map func(*fsutil.Stat) bool) error
sendFn func(stream grpc.Stream, fs fsutil.FS, progress progressCb) error
recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater, progress progressCb) error
}
@ -169,7 +176,7 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error {
}
}
if pr == nil {
return errors.New("no fssync handlers")
return errors.New("no local sources enabled")
}
opts := make(map[string][]string)
@ -256,7 +263,7 @@ func (sp *fsSyncTarget) DiffCopy(stream FileSend_DiffCopyServer) error {
return writeTargetFile(stream, sp.outfile)
}
func CopyToCaller(ctx context.Context, srcPath string, c session.Caller, progress func(int, bool)) error {
func CopyToCaller(ctx context.Context, fs fsutil.FS, c session.Caller, progress func(int, bool)) error {
method := session.MethodURL(_FileSend_serviceDesc.ServiceName, "diffcopy")
if !c.Supports(method) {
return errors.Errorf("method %s not supported by the client", method)
@ -269,7 +276,7 @@ func CopyToCaller(ctx context.Context, srcPath string, c session.Caller, progres
return err
}
return sendDiffCopy(cc, srcPath, nil, nil, nil, progress, nil)
return sendDiffCopy(cc, fs, progress)
}
func CopyFileWriter(ctx context.Context, c session.Caller) (io.WriteCloser, error) {

View file

@ -0,0 +1,3 @@
package secrets
//go:generate protoc --gogoslick_out=plugins=grpc:. secrets.proto

View file

@ -0,0 +1,30 @@
package secrets
import (
"context"
"github.com/moby/buildkit/session"
"github.com/pkg/errors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type SecretStore interface {
GetSecret(context.Context, string) ([]byte, error)
}
var ErrNotFound = errors.Errorf("not found")
func GetSecret(ctx context.Context, c session.Caller, id string) ([]byte, error) {
client := NewSecretsClient(c.Conn())
resp, err := client.GetSecret(ctx, &GetSecretRequest{
ID: id,
})
if err != nil {
if st, ok := status.FromError(err); ok && (st.Code() == codes.Unimplemented || st.Code() == codes.NotFound) {
return nil, errors.Wrapf(ErrNotFound, "secret %s not found", id)
}
return nil, err
}
return resp.Data, nil
}

View file

@ -0,0 +1,813 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: secrets.proto
/*
Package secrets is a generated protocol buffer package.
It is generated from these files:
secrets.proto
It has these top-level messages:
GetSecretRequest
GetSecretResponse
*/
package secrets
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import bytes "bytes"
import strings "strings"
import reflect "reflect"
import sortkeys "github.com/gogo/protobuf/sortkeys"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type GetSecretRequest struct {
ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
Annotations map[string]string `protobuf:"bytes,2,rep,name=annotations" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *GetSecretRequest) Reset() { *m = GetSecretRequest{} }
func (*GetSecretRequest) ProtoMessage() {}
func (*GetSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorSecrets, []int{0} }
func (m *GetSecretRequest) GetID() string {
if m != nil {
return m.ID
}
return ""
}
func (m *GetSecretRequest) GetAnnotations() map[string]string {
if m != nil {
return m.Annotations
}
return nil
}
type GetSecretResponse struct {
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
}
func (m *GetSecretResponse) Reset() { *m = GetSecretResponse{} }
func (*GetSecretResponse) ProtoMessage() {}
func (*GetSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorSecrets, []int{1} }
func (m *GetSecretResponse) GetData() []byte {
if m != nil {
return m.Data
}
return nil
}
func init() {
proto.RegisterType((*GetSecretRequest)(nil), "moby.buildkit.secrets.v1.GetSecretRequest")
proto.RegisterType((*GetSecretResponse)(nil), "moby.buildkit.secrets.v1.GetSecretResponse")
}
func (this *GetSecretRequest) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*GetSecretRequest)
if !ok {
that2, ok := that.(GetSecretRequest)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if this.ID != that1.ID {
return false
}
if len(this.Annotations) != len(that1.Annotations) {
return false
}
for i := range this.Annotations {
if this.Annotations[i] != that1.Annotations[i] {
return false
}
}
return true
}
func (this *GetSecretResponse) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*GetSecretResponse)
if !ok {
that2, ok := that.(GetSecretResponse)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if !bytes.Equal(this.Data, that1.Data) {
return false
}
return true
}
func (this *GetSecretRequest) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&secrets.GetSecretRequest{")
s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n")
keysForAnnotations := make([]string, 0, len(this.Annotations))
for k, _ := range this.Annotations {
keysForAnnotations = append(keysForAnnotations, k)
}
sortkeys.Strings(keysForAnnotations)
mapStringForAnnotations := "map[string]string{"
for _, k := range keysForAnnotations {
mapStringForAnnotations += fmt.Sprintf("%#v: %#v,", k, this.Annotations[k])
}
mapStringForAnnotations += "}"
if this.Annotations != nil {
s = append(s, "Annotations: "+mapStringForAnnotations+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *GetSecretResponse) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&secrets.GetSecretResponse{")
s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringSecrets(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Secrets service
type SecretsClient interface {
GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error)
}
type secretsClient struct {
cc *grpc.ClientConn
}
func NewSecretsClient(cc *grpc.ClientConn) SecretsClient {
return &secretsClient{cc}
}
func (c *secretsClient) GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error) {
out := new(GetSecretResponse)
err := grpc.Invoke(ctx, "/moby.buildkit.secrets.v1.Secrets/GetSecret", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Secrets service
type SecretsServer interface {
GetSecret(context.Context, *GetSecretRequest) (*GetSecretResponse, error)
}
func RegisterSecretsServer(s *grpc.Server, srv SecretsServer) {
s.RegisterService(&_Secrets_serviceDesc, srv)
}
func _Secrets_GetSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetSecretRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecretsServer).GetSecret(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/moby.buildkit.secrets.v1.Secrets/GetSecret",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecretsServer).GetSecret(ctx, req.(*GetSecretRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Secrets_serviceDesc = grpc.ServiceDesc{
ServiceName: "moby.buildkit.secrets.v1.Secrets",
HandlerType: (*SecretsServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetSecret",
Handler: _Secrets_GetSecret_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "secrets.proto",
}
func (m *GetSecretRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetSecretRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.ID) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintSecrets(dAtA, i, uint64(len(m.ID)))
i += copy(dAtA[i:], m.ID)
}
if len(m.Annotations) > 0 {
for k, _ := range m.Annotations {
dAtA[i] = 0x12
i++
v := m.Annotations[k]
mapSize := 1 + len(k) + sovSecrets(uint64(len(k))) + 1 + len(v) + sovSecrets(uint64(len(v)))
i = encodeVarintSecrets(dAtA, i, uint64(mapSize))
dAtA[i] = 0xa
i++
i = encodeVarintSecrets(dAtA, i, uint64(len(k)))
i += copy(dAtA[i:], k)
dAtA[i] = 0x12
i++
i = encodeVarintSecrets(dAtA, i, uint64(len(v)))
i += copy(dAtA[i:], v)
}
}
return i, nil
}
func (m *GetSecretResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetSecretResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Data) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintSecrets(dAtA, i, uint64(len(m.Data)))
i += copy(dAtA[i:], m.Data)
}
return i, nil
}
func encodeVarintSecrets(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *GetSecretRequest) Size() (n int) {
var l int
_ = l
l = len(m.ID)
if l > 0 {
n += 1 + l + sovSecrets(uint64(l))
}
if len(m.Annotations) > 0 {
for k, v := range m.Annotations {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovSecrets(uint64(len(k))) + 1 + len(v) + sovSecrets(uint64(len(v)))
n += mapEntrySize + 1 + sovSecrets(uint64(mapEntrySize))
}
}
return n
}
func (m *GetSecretResponse) Size() (n int) {
var l int
_ = l
l = len(m.Data)
if l > 0 {
n += 1 + l + sovSecrets(uint64(l))
}
return n
}
func sovSecrets(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozSecrets(x uint64) (n int) {
return sovSecrets(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *GetSecretRequest) String() string {
if this == nil {
return "nil"
}
keysForAnnotations := make([]string, 0, len(this.Annotations))
for k, _ := range this.Annotations {
keysForAnnotations = append(keysForAnnotations, k)
}
sortkeys.Strings(keysForAnnotations)
mapStringForAnnotations := "map[string]string{"
for _, k := range keysForAnnotations {
mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
}
mapStringForAnnotations += "}"
s := strings.Join([]string{`&GetSecretRequest{`,
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
`Annotations:` + mapStringForAnnotations + `,`,
`}`,
}, "")
return s
}
func (this *GetSecretResponse) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&GetSecretResponse{`,
`Data:` + fmt.Sprintf("%v", this.Data) + `,`,
`}`,
}, "")
return s
}
func valueToStringSecrets(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *GetSecretRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSecrets
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetSecretRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSecrets
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthSecrets
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSecrets
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthSecrets
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Annotations == nil {
m.Annotations = make(map[string]string)
}
var mapkey string
var mapvalue string
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSecrets
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSecrets
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthSecrets
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSecrets
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthSecrets
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
} else {
iNdEx = entryPreIndex
skippy, err := skipSecrets(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthSecrets
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Annotations[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipSecrets(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthSecrets
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetSecretResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSecrets
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetSecretResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSecrets
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthSecrets
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
if m.Data == nil {
m.Data = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipSecrets(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthSecrets
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipSecrets(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSecrets
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSecrets
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSecrets
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthSecrets
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSecrets
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipSecrets(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthSecrets = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowSecrets = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("secrets.proto", fileDescriptorSecrets) }
var fileDescriptorSecrets = []byte{
// 279 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2d, 0x4e, 0x4d, 0x2e,
0x4a, 0x2d, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4,
0x4b, 0x2a, 0xcd, 0xcc, 0x49, 0xc9, 0xce, 0x2c, 0xd1, 0x83, 0x49, 0x96, 0x19, 0x2a, 0x1d, 0x64,
0xe4, 0x12, 0x70, 0x4f, 0x2d, 0x09, 0x06, 0x8b, 0x04, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08,
0xf1, 0x71, 0x31, 0x79, 0xba, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x31, 0x79, 0xba, 0x08,
0xc5, 0x72, 0x71, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7, 0x15, 0x4b, 0x30,
0x29, 0x30, 0x6b, 0x70, 0x1b, 0x59, 0xeb, 0xe1, 0x32, 0x54, 0x0f, 0xdd, 0x40, 0x3d, 0x47, 0x84,
0x6e, 0xd7, 0xbc, 0x92, 0xa2, 0xca, 0x20, 0x64, 0xf3, 0xa4, 0xec, 0xb8, 0x04, 0xd0, 0x15, 0x08,
0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0xdd, 0x00, 0x62, 0x0a, 0x89, 0x70, 0xb1, 0x96, 0x25,
0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x81, 0xc5, 0x20, 0x1c, 0x2b, 0x26, 0x0b, 0x46, 0x25, 0x75, 0x2e,
0x41, 0x24, 0x1b, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0x84, 0xb8, 0x58, 0x52, 0x12, 0x4b,
0x12, 0xc1, 0x26, 0xf0, 0x04, 0x81, 0xd9, 0x46, 0xf9, 0x5c, 0xec, 0x10, 0x55, 0xc5, 0x42, 0x29,
0x5c, 0x9c, 0x70, 0x3d, 0x42, 0x5a, 0xc4, 0x7b, 0x45, 0x4a, 0x9b, 0x28, 0xb5, 0x10, 0x47, 0x38,
0x99, 0x5e, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x0d, 0x8f,
0xe4, 0x18, 0x57, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07,
0x8f, 0xe4, 0x18, 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86,
0x28, 0x76, 0xa8, 0x59, 0x49, 0x6c, 0xe0, 0x58, 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x05,
0x4e, 0x56, 0xde, 0xc6, 0x01, 0x00, 0x00,
}

View file

@ -0,0 +1,19 @@
syntax = "proto3";
package moby.buildkit.secrets.v1;
option go_package = "secrets";
service Secrets{
rpc GetSecret(GetSecretRequest) returns (GetSecretResponse);
}
message GetSecretRequest {
string ID = 1;
map<string, string> annotations = 2;
}
message GetSecretResponse {
bytes data = 1;
}

View file

@ -133,6 +133,11 @@ func (s *Store) Load(id string, resultID string) (solver.CacheResult, error) {
func (s *Store) AddResult(id string, res solver.CacheResult) error {
return s.db.Update(func(tx *bolt.Tx) error {
_, err := tx.Bucket([]byte(linksBucket)).CreateBucketIfNotExists([]byte(id))
if err != nil {
return err
}
b, err := tx.Bucket([]byte(resultBucket)).CreateBucketIfNotExists([]byte(id))
if err != nil {
return err
@ -218,7 +223,7 @@ func (s *Store) releaseHelper(tx *bolt.Tx, id, resultID string) error {
return nil
}
if err := ids.Delete([]byte(resultID)); err != nil {
if err := ids.Delete([]byte(id)); err != nil {
return err
}
@ -283,6 +288,11 @@ func (s *Store) emptyBranchWithParents(tx *bolt.Tx, id []byte) error {
return err
}
}
// intentionally ignoring errors
tx.Bucket([]byte(linksBucket)).DeleteBucket([]byte(id))
tx.Bucket([]byte(resultBucket)).DeleteBucket([]byte(id))
return nil
}

View file

@ -7,6 +7,7 @@ import (
"github.com/moby/buildkit/identity"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
type CacheID string
@ -22,14 +23,9 @@ func NewCacheManager(id string, storage CacheKeyStorage, results CacheResultStor
results: results,
}
storage.Walk(func(id string) error {
return storage.WalkResults(id, func(cr CacheResult) error {
if !results.Exists(cr.ID) {
storage.Release(cr.ID)
}
return nil
})
})
if err := cm.ReleaseUnreferenced(); err != nil {
logrus.Errorf("failed to release unreferenced cache metadata: %+v", err)
}
return cm
}
@ -42,6 +38,17 @@ type cacheManager struct {
results CacheResultStorage
}
func (c *cacheManager) ReleaseUnreferenced() error {
return c.backend.Walk(func(id string) error {
return c.backend.WalkResults(id, func(cr CacheResult) error {
if !c.results.Exists(cr.ID) {
c.backend.Release(cr.ID)
}
return nil
})
})
}
func (c *cacheManager) ID() string {
return c.id
}

View file

@ -144,7 +144,9 @@ func (e *edge) commitOptions() ([]*CacheKey, []CachedResult) {
inputs := make([][]CacheKeyWithSelector, len(e.deps))
results := make([]CachedResult, len(e.deps))
for i, dep := range e.deps {
inputs[i] = append(inputs[i], CacheKeyWithSelector{CacheKey: dep.result.CacheKey(), Selector: e.cacheMap.Deps[i].Selector})
for _, k := range dep.result.CacheKeys() {
inputs[i] = append(inputs[i], CacheKeyWithSelector{CacheKey: k, Selector: e.cacheMap.Deps[i].Selector})
}
if dep.slowCacheKey != nil {
inputs[i] = append(inputs[i], CacheKeyWithSelector{CacheKey: *dep.slowCacheKey})
}
@ -245,7 +247,9 @@ func (e *edge) currentIndexKey() *CacheKey {
keys[i] = append(keys[i], CacheKeyWithSelector{Selector: e.cacheMap.Deps[i].Selector, CacheKey: k})
}
if d.result != nil {
keys[i] = append(keys[i], CacheKeyWithSelector{Selector: e.cacheMap.Deps[i].Selector, CacheKey: d.result.CacheKey()})
for _, rk := range d.result.CacheKeys() {
keys[i] = append(keys[i], CacheKeyWithSelector{Selector: e.cacheMap.Deps[i].Selector, CacheKey: rk})
}
if d.slowCacheKey != nil {
keys[i] = append(keys[i], CacheKeyWithSelector{CacheKey: ExportableCacheKey{CacheKey: d.slowCacheKey.CacheKey, Exporter: &exporter{k: d.slowCacheKey.CacheKey}}})
}
@ -413,7 +417,7 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) {
}
// response for requests to dependencies
if dep, ok := e.depRequests[upt]; ok { // TODO: ignore canceled
if dep, ok := e.depRequests[upt]; ok {
if err := upt.Status().Err; !upt.Status().Canceled && upt.Status().Completed && err != nil {
if e.err == nil {
e.err = err
@ -427,6 +431,7 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) {
newKeys := state.keys[len(dep.keys):]
if e.cacheMap != nil {
e.probeCache(dep, withSelector(newKeys, e.cacheMap.Deps[dep.index].Selector))
dep.edgeState.keys = state.keys
if e.allDepsHaveKeys() {
e.keysDidChange = true
}
@ -461,11 +466,14 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) {
k := NewCacheKey(upt.Status().Value.(digest.Digest), -1)
dep.slowCacheKey = &ExportableCacheKey{CacheKey: k, Exporter: &exporter{k: k}}
slowKeyExp := CacheKeyWithSelector{CacheKey: *dep.slowCacheKey}
defKeyExp := CacheKeyWithSelector{CacheKey: dep.result.CacheKey(), Selector: e.cacheMap.Deps[i].Selector}
defKeys := make([]CacheKeyWithSelector, 0, len(dep.result.CacheKeys()))
for _, dk := range dep.result.CacheKeys() {
defKeys = append(defKeys, CacheKeyWithSelector{CacheKey: dk, Selector: e.cacheMap.Deps[i].Selector})
}
dep.slowCacheFoundKey = e.probeCache(dep, []CacheKeyWithSelector{slowKeyExp})
// connect def key to slow key
e.op.Cache().Query([]CacheKeyWithSelector{defKeyExp, slowKeyExp}, dep.index, e.cacheMap.Digest, e.edge.Index)
e.op.Cache().Query(append(defKeys, slowKeyExp), dep.index, e.cacheMap.Digest, e.edge.Index)
dep.slowCacheComplete = true
e.keysDidChange = true
@ -510,7 +518,9 @@ func (e *edge) recalcCurrentState() {
mergedKey.deps = make([][]CacheKeyWithSelector, len(e.deps))
for i, dep := range e.deps {
if dep.result != nil {
mergedKey.deps[i] = append(mergedKey.deps[i], CacheKeyWithSelector{Selector: e.cacheMap.Deps[i].Selector, CacheKey: dep.result.CacheKey()})
for _, dk := range dep.result.CacheKeys() {
mergedKey.deps[i] = append(mergedKey.deps[i], CacheKeyWithSelector{Selector: e.cacheMap.Deps[i].Selector, CacheKey: dk})
}
if dep.slowCacheKey != nil {
mergedKey.deps[i] = append(mergedKey.deps[i], CacheKeyWithSelector{CacheKey: *dep.slowCacheKey})
}
@ -789,7 +799,7 @@ func (e *edge) loadCache(ctx context.Context) (interface{}, error) {
return nil, err
}
return NewCachedResult(res, ExportableCacheKey{CacheKey: rec.key, Exporter: &exporter{k: rec.key, record: rec, edge: e}}), nil
return NewCachedResult(res, []ExportableCacheKey{{CacheKey: rec.key, Exporter: &exporter{k: rec.key, record: rec, edge: e}}}), nil
}
// execOp creates a request to execute the vertex operation
@ -834,12 +844,15 @@ func (e *edge) execOp(ctx context.Context) (interface{}, error) {
exporters = append(exporters, exps...)
}
ck := &ExportableCacheKey{
CacheKey: cacheKeys[0],
Exporter: &mergedExporter{exporters: exporters},
ek := make([]ExportableCacheKey, 0, len(cacheKeys))
for _, ck := range cacheKeys {
ek = append(ek, ExportableCacheKey{
CacheKey: ck,
Exporter: &mergedExporter{exporters: exporters},
})
}
return NewCachedResult(res, *ck), nil
return NewCachedResult(res, ek), nil
}
func toResultSlice(cres []CachedResult) (out []Result) {

View file

@ -7,7 +7,6 @@ import (
"time"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/util/flightcontrol"
"github.com/moby/buildkit/util/progress"
@ -21,7 +20,7 @@ type ResolveOpFunc func(Vertex, Builder) (Op, error)
type Builder interface {
Build(ctx context.Context, e Edge) (CachedResult, error)
Call(ctx context.Context, name string, fn func(ctx context.Context) error) error
Context(ctx context.Context) context.Context
}
// Solver provides a shared graph of all the vertexes currently being
@ -161,14 +160,13 @@ func (sb *subBuilder) Build(ctx context.Context, e Edge) (CachedResult, error) {
return nil, err
}
sb.mu.Lock()
sb.exporters = append(sb.exporters, res.CacheKey())
sb.exporters = append(sb.exporters, res.CacheKeys()[0]) // all keys already have full export chain
sb.mu.Unlock()
return res, nil
}
func (sb *subBuilder) Call(ctx context.Context, name string, fn func(ctx context.Context) error) error {
ctx = progress.WithProgress(ctx, sb.mpw)
return inVertexContext(ctx, name, fn)
func (sb *subBuilder) Context(ctx context.Context) context.Context {
return progress.WithProgress(ctx, sb.mpw)
}
type Job struct {
@ -444,9 +442,8 @@ func (j *Job) Discard() error {
return nil
}
func (j *Job) Call(ctx context.Context, name string, fn func(ctx context.Context) error) error {
ctx = progress.WithProgress(ctx, j.pw)
return inVertexContext(ctx, name, fn)
func (j *Job) Context(ctx context.Context) context.Context {
return progress.WithProgress(ctx, j.pw)
}
type cacheMapResp struct {
@ -557,6 +554,9 @@ func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, f ResultBased
return key, err
})
if err != nil {
ctx = progress.WithProgress(ctx, s.st.mpw)
notifyStarted(ctx, &s.st.clientVertex, false)
notifyCompleted(ctx, &s.st.clientVertex, err, false)
return "", err
}
return key.(digest.Digest), nil
@ -759,16 +759,3 @@ func notifyCompleted(ctx context.Context, v *client.Vertex, err error, cached bo
}
pw.Write(v.Digest.String(), *v)
}
func inVertexContext(ctx context.Context, name string, f func(ctx context.Context) error) error {
v := client.Vertex{
Digest: digest.FromBytes([]byte(identity.NewID())),
Name: name,
}
pw, _, ctx := progress.FromContext(ctx, progress.WithMetadata("vertex", v.Digest))
notifyStarted(ctx, &v, false)
defer pw.Close()
err := f(ctx)
notifyCompleted(ctx, &v, err, false)
return err
}

View file

@ -2,6 +2,7 @@ package llbsolver
import (
"context"
"fmt"
"io"
"strings"
"sync"
@ -11,6 +12,7 @@ import (
"github.com/moby/buildkit/cache/remotecache"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/frontend"
gw "github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/tracing"
"github.com/moby/buildkit/worker"
@ -47,7 +49,7 @@ func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res *
func(ref string) {
cm = newLazyCacheManager(ref, func() (solver.CacheManager, error) {
var cmNew solver.CacheManager
if err := b.builder.Call(ctx, "importing cache manifest from "+ref, func(ctx context.Context) error {
if err := inVertexContext(b.builder.Context(ctx), "importing cache manifest from "+ref, func(ctx context.Context) error {
if b.resolveCacheImporter == nil {
return errors.New("no cache importer is available")
}
@ -73,7 +75,7 @@ func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res *
}
if req.Definition != nil && req.Definition.Def != nil {
edge, err := Load(req.Definition, WithCacheSources(cms), RuntimePlatforms(b.platforms))
edge, err := Load(req.Definition, WithCacheSources(cms), RuntimePlatforms(b.platforms), WithValidateCaps())
if err != nil {
return nil, err
}
@ -127,12 +129,19 @@ func (s *llbBridge) Exec(ctx context.Context, meta executor.Meta, root cache.Imm
return err
}
func (s *llbBridge) ResolveImageConfig(ctx context.Context, ref string, platform *specs.Platform) (digest.Digest, []byte, error) {
func (s *llbBridge) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (dgst digest.Digest, config []byte, err error) {
w, err := s.resolveWorker()
if err != nil {
return "", nil, err
}
return w.ResolveImageConfig(ctx, ref, platform)
if opt.LogName == "" {
opt.LogName = fmt.Sprintf("resolve image config for %s", ref)
}
err = inVertexContext(s.builder.Context(ctx), opt.LogName, func(ctx context.Context) error {
dgst, config, err = w.ResolveImageConfig(ctx, ref, opt)
return err
})
return dgst, config, err
}
type lazyCacheManager struct {

View file

@ -5,8 +5,10 @@ import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"strings"
@ -18,7 +20,11 @@ import (
"github.com/docker/docker/pkg/locker"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/secrets"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/llbsolver"
@ -26,6 +32,7 @@ import (
"github.com/moby/buildkit/util/progress/logs"
"github.com/moby/buildkit/worker"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/runc/libcontainer/system"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@ -35,6 +42,7 @@ const execCacheType = "buildkit.exec.v0"
type execOp struct {
op *pb.ExecOp
cm cache.Manager
sm *session.Manager
md *metadata.Store
exec executor.Executor
w worker.Worker
@ -43,10 +51,11 @@ type execOp struct {
cacheMounts map[string]*cacheRefShare
}
func NewExecOp(v solver.Vertex, op *pb.Op_Exec, cm cache.Manager, md *metadata.Store, exec executor.Executor, w worker.Worker) (solver.Op, error) {
func NewExecOp(v solver.Vertex, op *pb.Op_Exec, cm cache.Manager, sm *session.Manager, md *metadata.Store, exec executor.Executor, w worker.Worker) (solver.Op, error) {
return &execOp{
op: op.Exec,
cm: cm,
sm: sm,
md: md,
exec: exec,
numInputs: len(v.Inputs()),
@ -206,7 +215,7 @@ func (e *execOp) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id
func (e *execOp) getRefCacheDirNoCache(ctx context.Context, key string, ref cache.ImmutableRef, id string, m *pb.Mount, block bool) (cache.MutableRef, error) {
makeMutable := func(cache.ImmutableRef) (cache.MutableRef, error) {
desc := fmt.Sprintf("cached mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " "))
return e.cm.New(ctx, ref, cache.WithDescription(desc), cache.CachePolicyRetain)
return e.cm.New(ctx, ref, cache.WithRecordType(client.UsageRecordTypeCacheMount), cache.WithDescription(desc), cache.CachePolicyRetain)
}
cacheRefsLocker.Lock(key)
@ -259,6 +268,112 @@ func (e *execOp) getRefCacheDirNoCache(ctx context.Context, key string, ref cach
return mRef, nil
}
func (e *execOp) getSecretMountable(ctx context.Context, m *pb.Mount) (cache.Mountable, error) {
if m.SecretOpt == nil {
return nil, errors.Errorf("invalid sercet mount options")
}
sopt := *m.SecretOpt
id := sopt.ID
if id == "" {
return nil, errors.Errorf("secret ID missing from mount options")
}
sessionID := session.FromContext(ctx)
if sessionID == "" {
return nil, errors.New("could not access local files without session")
}
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
caller, err := e.sm.Get(timeoutCtx, sessionID)
if err != nil {
return nil, err
}
dt, err := secrets.GetSecret(ctx, caller, id)
if err != nil {
if errors.Cause(err) == secrets.ErrNotFound && m.SecretOpt.Optional {
return nil, nil
}
return nil, err
}
return &secretMount{mount: m, data: dt}, nil
}
type secretMount struct {
mount *pb.Mount
data []byte
}
func (sm *secretMount) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
return &secretMountInstance{sm: sm}, nil
}
type secretMountInstance struct {
sm *secretMount
root string
}
func (sm *secretMountInstance) Mount() ([]mount.Mount, error) {
dir, err := ioutil.TempDir("", "buildkit-secrets")
if err != nil {
return nil, errors.Wrap(err, "failed to create temp dir")
}
if err := os.Chmod(dir, 0711); err != nil {
return nil, err
}
tmpMount := mount.Mount{
Type: "tmpfs",
Source: "tmpfs",
Options: []string{"nodev", "nosuid", "noexec", fmt.Sprintf("uid=%d,gid=%d", os.Geteuid(), os.Getegid())},
}
if system.RunningInUserNS() {
tmpMount.Options = nil
}
if err := mount.All([]mount.Mount{tmpMount}, dir); err != nil {
return nil, errors.Wrap(err, "unable to setup secret mount")
}
sm.root = dir
randID := identity.NewID()
fp := filepath.Join(dir, randID)
if err := ioutil.WriteFile(fp, sm.sm.data, 0600); err != nil {
sm.Release()
return nil, err
}
if err := os.Chown(fp, int(sm.sm.mount.SecretOpt.Uid), int(sm.sm.mount.SecretOpt.Gid)); err != nil {
return nil, err
}
if err := os.Chmod(fp, os.FileMode(sm.sm.mount.SecretOpt.Mode)); err != nil {
return nil, err
}
return []mount.Mount{{
Type: "bind",
Source: fp,
Options: []string{"ro", "rbind"},
}}, nil
}
func (sm *secretMountInstance) Release() error {
if sm.root != "" {
if err := mount.Unmount(sm.root, 0); err != nil {
return err
}
return os.RemoveAll(sm.root)
}
return nil
}
func (e *execOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Result, error) {
var mounts []executor.Mount
var root cache.Mountable
@ -347,6 +462,15 @@ func (e *execOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Res
case pb.MountType_TMPFS:
mountable = newTmpfs()
case pb.MountType_SECRET:
secretMount, err := e.getSecretMountable(ctx, m)
if err != nil {
return nil, err
}
if secretMount == nil {
continue
}
mountable = secretMount
default:
return nil, errors.Errorf("mount type %s not implemented", m.MountType)
}

View file

@ -4,10 +4,7 @@ import (
"bytes"
"context"
"path"
"strings"
"time"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/contenthash"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/worker"
@ -53,95 +50,6 @@ func NewContentHashFunc(selectors []string) solver.ResultBasedCacheFunc {
}
}
func newCacheResultStorage(wc *worker.Controller) solver.CacheResultStorage {
return &cacheResultStorage{
wc: wc,
}
}
type cacheResultStorage struct {
wc *worker.Controller
}
func (s *cacheResultStorage) Save(res solver.Result) (solver.CacheResult, error) {
ref, ok := res.Sys().(*worker.WorkerRef)
if !ok {
return solver.CacheResult{}, errors.Errorf("invalid result: %T", res.Sys())
}
if ref.ImmutableRef != nil {
if !cache.HasCachePolicyRetain(ref.ImmutableRef) {
if err := cache.CachePolicyRetain(ref.ImmutableRef); err != nil {
return solver.CacheResult{}, err
}
ref.ImmutableRef.Metadata().Commit()
}
}
return solver.CacheResult{ID: ref.ID(), CreatedAt: time.Now()}, nil
}
func (s *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult) (solver.Result, error) {
return s.load(res.ID)
}
func (s *cacheResultStorage) getWorkerRef(id string) (worker.Worker, string, error) {
workerID, refID, err := parseWorkerRef(id)
if err != nil {
return nil, "", err
}
w, err := s.wc.Get(workerID)
if err != nil {
return nil, "", err
}
return w, refID, nil
}
func (s *cacheResultStorage) load(id string) (solver.Result, error) {
w, refID, err := s.getWorkerRef(id)
if err != nil {
return nil, err
}
if refID == "" {
return worker.NewWorkerRefResult(nil, w), nil
}
ref, err := w.LoadRef(refID)
if err != nil {
return nil, err
}
return worker.NewWorkerRefResult(ref, w), nil
}
func (s *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheResult) (*solver.Remote, error) {
w, refID, err := s.getWorkerRef(res.ID)
if err != nil {
return nil, err
}
ref, err := w.LoadRef(refID)
if err != nil {
return nil, err
}
defer ref.Release(context.TODO())
remote, err := w.GetRemote(ctx, ref, false)
if err != nil {
return nil, nil // ignore error. loadRemote is best effort
}
return remote, nil
}
func (s *cacheResultStorage) Exists(id string) bool {
ref, err := s.load(id)
if err != nil {
return false
}
ref.Release(context.TODO())
return true
}
func parseWorkerRef(id string) (string, string, error) {
parts := strings.Split(id, "::")
if len(parts) != 2 {
return "", "", errors.Errorf("invalid workerref id: %s", id)
}
return parts[0], parts[1], nil
}
func workerRefConverter(ctx context.Context, res solver.Result) (*solver.Remote, error) {
ref, ok := res.Sys().(*worker.WorkerRef)
if !ok {

View file

@ -9,10 +9,12 @@ import (
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/exporter"
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/progress"
"github.com/moby/buildkit/worker"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
@ -34,17 +36,13 @@ type Solver struct {
platforms []specs.Platform
}
func New(wc *worker.Controller, f map[string]frontend.Frontend, cacheStore solver.CacheKeyStorage, resolveCI remotecache.ResolveCacheImporterFunc) (*Solver, error) {
func New(wc *worker.Controller, f map[string]frontend.Frontend, cache solver.CacheManager, resolveCI remotecache.ResolveCacheImporterFunc) (*Solver, error) {
s := &Solver{
resolveWorker: defaultResolver(wc),
frontends: f,
resolveCacheImporter: resolveCI,
}
results := newCacheResultStorage(wc)
cache := solver.NewCacheManager("local", cacheStore, results)
// executing is currently only allowed on default worker
w, err := wc.GetDefault()
if err != nil {
@ -104,17 +102,34 @@ func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest
var exporterResponse map[string]string
if exp := exp.Exporter; exp != nil {
var immutable cache.ImmutableRef
if res := res.Ref; res != nil { // FIXME(tonistiigi):
inp := exporter.Source{
Metadata: res.Metadata,
}
if res := res.Ref; res != nil {
workerRef, ok := res.Sys().(*worker.WorkerRef)
if !ok {
return nil, errors.Errorf("invalid reference: %T", res.Sys())
}
immutable = workerRef.ImmutableRef
inp.Ref = workerRef.ImmutableRef
}
if res.Refs != nil {
m := make(map[string]cache.ImmutableRef, len(res.Refs))
for k, res := range res.Refs {
if res == nil {
m[k] = nil
} else {
workerRef, ok := res.Sys().(*worker.WorkerRef)
if !ok {
return nil, errors.Errorf("invalid reference: %T", res.Sys())
}
m[k] = workerRef.ImmutableRef
}
}
inp.Refs = m
}
if err := j.Call(ctx, exp.Name(), func(ctx context.Context) error {
exporterResponse, err = exp.Export(ctx, immutable, res.Metadata)
if err := inVertexContext(j.Context(ctx), exp.Name(), func(ctx context.Context) error {
exporterResponse, err = exp.Export(ctx, inp)
return err
}); err != nil {
return nil, err
@ -122,10 +137,11 @@ func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest
}
if e := exp.CacheExporter; e != nil {
if err := j.Call(ctx, "exporting cache", func(ctx context.Context) error {
if err := inVertexContext(j.Context(ctx), "exporting cache", func(ctx context.Context) error {
prepareDone := oneOffProgress(ctx, "preparing build cache for export")
if err := res.EachRef(func(res solver.CachedResult) error {
_, err := res.CacheKey().Exporter.ExportTo(ctx, e, solver.CacheExportOpt{
// all keys have same export chain so exporting others is not needed
_, err := res.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{
Convert: workerRefConverter,
Mode: exp.CacheExportMode,
})
@ -175,3 +191,41 @@ func oneOffProgress(ctx context.Context, id string) func(err error) error {
return err
}
}
func inVertexContext(ctx context.Context, name string, f func(ctx context.Context) error) error {
v := client.Vertex{
Digest: digest.FromBytes([]byte(identity.NewID())),
Name: name,
}
pw, _, ctx := progress.FromContext(ctx, progress.WithMetadata("vertex", v.Digest))
notifyStarted(ctx, &v, false)
defer pw.Close()
err := f(ctx)
notifyCompleted(ctx, &v, err, false)
return err
}
func notifyStarted(ctx context.Context, v *client.Vertex, cached bool) {
pw, _, _ := progress.FromContext(ctx)
defer pw.Close()
now := time.Now()
v.Started = &now
v.Completed = nil
v.Cached = cached
pw.Write(v.Digest.String(), *v)
}
func notifyCompleted(ctx context.Context, v *client.Vertex, err error, cached bool) {
pw, _, _ := progress.FromContext(ctx)
defer pw.Close()
now := time.Now()
if v.Started == nil {
v.Started = &now
}
v.Completed = &now
v.Cached = cached
if err != nil {
v.Error = err.Error()
}
pw.Write(v.Digest.String(), *v)
}

View file

@ -37,11 +37,28 @@ func (v *vertex) Inputs() []solver.Edge {
}
func (v *vertex) Name() string {
if name, ok := v.options.Description["llb.customname"]; ok {
return name
}
return v.name
}
type LoadOpt func(*pb.Op, *pb.OpMetadata, *solver.VertexOptions) error
func WithValidateCaps() LoadOpt {
cs := pb.Caps.CapSet(pb.Caps.All())
return func(_ *pb.Op, md *pb.OpMetadata, opt *solver.VertexOptions) error {
if md != nil {
for c := range md.Caps {
if err := cs.Supports(c); err != nil {
return err
}
}
}
return nil
}
}
func WithCacheSources(cms []solver.CacheManager) LoadOpt {
return func(_ *pb.Op, _ *pb.OpMetadata, opt *solver.VertexOptions) error {
opt.CacheSources = cms

View file

@ -15,3 +15,9 @@ const AttrHTTPFilename = "http.filename"
const AttrHTTPPerm = "http.perm"
const AttrHTTPUID = "http.uid"
const AttrHTTPGID = "http.gid"
const AttrImageResolveMode = "image.resolvemode"
const AttrImageResolveModeDefault = "default"
const AttrImageResolveModeForcePull = "pull"
const AttrImageResolveModePreferLocal = "local"
const AttrImageRecordType = "image.recordtype"

231
vendor/github.com/moby/buildkit/solver/pb/caps.go generated vendored Normal file
View file

@ -0,0 +1,231 @@
package pb
import "github.com/moby/buildkit/util/apicaps"
var Caps apicaps.CapList
// Every backwards or forwards non-compatible change needs to add a new capability row.
// By default new capabilities should be experimental. After merge a capability is
// considered immutable. After a capability is marked stable it should not be disabled.
const (
CapSourceImage apicaps.CapID = "source.image"
CapSourceImageResolveMode apicaps.CapID = "source.image.resolvemode"
CapSourceLocal apicaps.CapID = "source.local"
CapSourceLocalUnique apicaps.CapID = "source.local.unique"
CapSourceLocalSessionID apicaps.CapID = "source.local.sessionid"
CapSourceLocalIncludePatterns apicaps.CapID = "source.local.includepatterns"
CapSourceLocalFollowPaths apicaps.CapID = "source.local.followpaths"
CapSourceLocalExcludePatterns apicaps.CapID = "source.local.excludepatterns"
CapSourceLocalSharedKeyHint apicaps.CapID = "source.local.sharedkeyhint"
CapSourceGit apicaps.CapID = "source.git"
CapSourceGitKeepDir apicaps.CapID = "source.git.keepgitdir"
CapSourceGitFullURL apicaps.CapID = "source.git.fullurl"
CapSourceHTTP apicaps.CapID = "source.http"
CapSourceHTTPChecksum apicaps.CapID = "source.http.checksum"
CapSourceHTTPPerm apicaps.CapID = "source.http.perm"
CapSourceHTTPUIDGID apicaps.CapID = "soruce.http.uidgid"
CapBuildOpLLBFileName apicaps.CapID = "source.buildop.llbfilename"
CapExecMetaBase apicaps.CapID = "exec.meta.base"
CapExecMetaProxy apicaps.CapID = "exec.meta.proxyenv"
CapExecMountBind apicaps.CapID = "exec.mount.bind"
CapExecMountCache apicaps.CapID = "exec.mount.cache"
CapExecMountCacheSharing apicaps.CapID = "exec.mount.cache.sharing"
CapExecMountSelector apicaps.CapID = "exec.mount.selector"
CapExecMountTmpfs apicaps.CapID = "exec.mount.tmpfs"
CapMountSecret apicaps.CapID = "exec.mount.secret"
CapConstraints apicaps.CapID = "constraints"
CapPlatform apicaps.CapID = "platform"
CapMetaIgnoreCache apicaps.CapID = "meta.ignorecache"
CapMetaDescription apicaps.CapID = "meta.description"
CapMetaExportCache apicaps.CapID = "meta.exportcache"
)
func init() {
Caps.Init(apicaps.Cap{
ID: CapSourceImage,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapSourceImageResolveMode,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapSourceLocal,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapSourceLocalUnique,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapSourceLocalSessionID,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapSourceLocalIncludePatterns,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapSourceLocalFollowPaths,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapSourceLocalExcludePatterns,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapSourceLocalSharedKeyHint,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapSourceGit,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapSourceGitKeepDir,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapSourceGitFullURL,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapSourceHTTP,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapSourceHTTPChecksum,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapSourceHTTPPerm,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapSourceHTTPUIDGID,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapBuildOpLLBFileName,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapExecMetaBase,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapExecMetaProxy,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapExecMountBind,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapExecMountCache,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapExecMountCacheSharing,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapExecMountSelector,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapExecMountTmpfs,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapMountSecret,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapConstraints,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapPlatform,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapMetaIgnoreCache,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapMetaDescription,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapMetaExportCache,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
}

View file

@ -18,6 +18,7 @@
Meta
Mount
CacheOpt
SecretOpt
CopyOp
CopySource
SourceOp
@ -37,6 +38,7 @@ import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
import github_com_moby_buildkit_util_apicaps "github.com/moby/buildkit/util/apicaps"
import io "io"
@ -468,6 +470,7 @@ type Mount struct {
Readonly bool `protobuf:"varint,5,opt,name=readonly,proto3" json:"readonly,omitempty"`
MountType MountType `protobuf:"varint,6,opt,name=mountType,proto3,enum=pb.MountType" json:"mountType,omitempty"`
CacheOpt *CacheOpt `protobuf:"bytes,20,opt,name=cacheOpt" json:"cacheOpt,omitempty"`
SecretOpt *SecretOpt `protobuf:"bytes,21,opt,name=secretOpt" json:"secretOpt,omitempty"`
}
func (m *Mount) Reset() { *m = Mount{} }
@ -510,6 +513,13 @@ func (m *Mount) GetCacheOpt() *CacheOpt {
return nil
}
func (m *Mount) GetSecretOpt() *SecretOpt {
if m != nil {
return m.SecretOpt
}
return nil
}
// CacheOpt defines options specific to cache mounts
type CacheOpt struct {
// ID is an optional namespace for the mount
@ -537,6 +547,61 @@ func (m *CacheOpt) GetSharing() CacheSharingOpt {
return CacheSharingOpt_SHARED
}
// SecretOpt defines options describing secret mounts
type SecretOpt struct {
// ID of secret. Used for quering the value.
ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
// UID of secret file
Uid uint32 `protobuf:"varint,2,opt,name=uid,proto3" json:"uid,omitempty"`
// GID of secret file
Gid uint32 `protobuf:"varint,3,opt,name=gid,proto3" json:"gid,omitempty"`
// Mode is the filesystem mode of secret file
Mode uint32 `protobuf:"varint,4,opt,name=mode,proto3" json:"mode,omitempty"`
// Optional defines if secret value is required. Error is produced
// if value is not found and optional is false.
Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"`
}
func (m *SecretOpt) Reset() { *m = SecretOpt{} }
func (m *SecretOpt) String() string { return proto.CompactTextString(m) }
func (*SecretOpt) ProtoMessage() {}
func (*SecretOpt) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{7} }
func (m *SecretOpt) GetID() string {
if m != nil {
return m.ID
}
return ""
}
func (m *SecretOpt) GetUid() uint32 {
if m != nil {
return m.Uid
}
return 0
}
func (m *SecretOpt) GetGid() uint32 {
if m != nil {
return m.Gid
}
return 0
}
func (m *SecretOpt) GetMode() uint32 {
if m != nil {
return m.Mode
}
return 0
}
func (m *SecretOpt) GetOptional() bool {
if m != nil {
return m.Optional
}
return false
}
// CopyOp copies files across Ops.
type CopyOp struct {
Src []*CopySource `protobuf:"bytes,1,rep,name=src" json:"src,omitempty"`
@ -546,7 +611,7 @@ type CopyOp struct {
func (m *CopyOp) Reset() { *m = CopyOp{} }
func (m *CopyOp) String() string { return proto.CompactTextString(m) }
func (*CopyOp) ProtoMessage() {}
func (*CopyOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{7} }
func (*CopyOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{8} }
func (m *CopyOp) GetSrc() []*CopySource {
if m != nil {
@ -571,7 +636,7 @@ type CopySource struct {
func (m *CopySource) Reset() { *m = CopySource{} }
func (m *CopySource) String() string { return proto.CompactTextString(m) }
func (*CopySource) ProtoMessage() {}
func (*CopySource) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{8} }
func (*CopySource) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{9} }
func (m *CopySource) GetSelector() string {
if m != nil {
@ -592,7 +657,7 @@ type SourceOp struct {
func (m *SourceOp) Reset() { *m = SourceOp{} }
func (m *SourceOp) String() string { return proto.CompactTextString(m) }
func (*SourceOp) ProtoMessage() {}
func (*SourceOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{9} }
func (*SourceOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{10} }
func (m *SourceOp) GetIdentifier() string {
if m != nil {
@ -620,7 +685,7 @@ type BuildOp struct {
func (m *BuildOp) Reset() { *m = BuildOp{} }
func (m *BuildOp) String() string { return proto.CompactTextString(m) }
func (*BuildOp) ProtoMessage() {}
func (*BuildOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{10} }
func (*BuildOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{11} }
func (m *BuildOp) GetInputs() map[string]*BuildInput {
if m != nil {
@ -651,7 +716,7 @@ type BuildInput struct {
func (m *BuildInput) Reset() { *m = BuildInput{} }
func (m *BuildInput) String() string { return proto.CompactTextString(m) }
func (*BuildInput) ProtoMessage() {}
func (*BuildInput) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{11} }
func (*BuildInput) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{12} }
// OpMetadata is a per-vertex metadata entry, which can be defined for arbitrary Op vertex and overridable on the run time.
type OpMetadata struct {
@ -661,13 +726,14 @@ type OpMetadata struct {
Description map[string]string `protobuf:"bytes,2,rep,name=description" json:"description,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// index 3 reserved for WorkerConstraint in previous versions
// WorkerConstraint worker_constraint = 3;
ExportCache *ExportCache `protobuf:"bytes,4,opt,name=export_cache,json=exportCache" json:"export_cache,omitempty"`
ExportCache *ExportCache `protobuf:"bytes,4,opt,name=export_cache,json=exportCache" json:"export_cache,omitempty"`
Caps map[github_com_moby_buildkit_util_apicaps.CapID]bool `protobuf:"bytes,5,rep,name=caps,castkey=github.com/moby/buildkit/util/apicaps.CapID" json:"caps" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
}
func (m *OpMetadata) Reset() { *m = OpMetadata{} }
func (m *OpMetadata) String() string { return proto.CompactTextString(m) }
func (*OpMetadata) ProtoMessage() {}
func (*OpMetadata) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{12} }
func (*OpMetadata) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{13} }
func (m *OpMetadata) GetIgnoreCache() bool {
if m != nil {
@ -690,6 +756,13 @@ func (m *OpMetadata) GetExportCache() *ExportCache {
return nil
}
func (m *OpMetadata) GetCaps() map[github_com_moby_buildkit_util_apicaps.CapID]bool {
if m != nil {
return m.Caps
}
return nil
}
type ExportCache struct {
Value bool `protobuf:"varint,1,opt,name=Value,proto3" json:"Value,omitempty"`
}
@ -697,7 +770,7 @@ type ExportCache struct {
func (m *ExportCache) Reset() { *m = ExportCache{} }
func (m *ExportCache) String() string { return proto.CompactTextString(m) }
func (*ExportCache) ProtoMessage() {}
func (*ExportCache) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{13} }
func (*ExportCache) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{14} }
func (m *ExportCache) GetValue() bool {
if m != nil {
@ -716,7 +789,7 @@ type ProxyEnv struct {
func (m *ProxyEnv) Reset() { *m = ProxyEnv{} }
func (m *ProxyEnv) String() string { return proto.CompactTextString(m) }
func (*ProxyEnv) ProtoMessage() {}
func (*ProxyEnv) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{14} }
func (*ProxyEnv) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{15} }
func (m *ProxyEnv) GetHttpProxy() string {
if m != nil {
@ -754,7 +827,7 @@ type WorkerConstraints struct {
func (m *WorkerConstraints) Reset() { *m = WorkerConstraints{} }
func (m *WorkerConstraints) String() string { return proto.CompactTextString(m) }
func (*WorkerConstraints) ProtoMessage() {}
func (*WorkerConstraints) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{15} }
func (*WorkerConstraints) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{16} }
func (m *WorkerConstraints) GetFilter() []string {
if m != nil {
@ -775,7 +848,7 @@ type Definition struct {
func (m *Definition) Reset() { *m = Definition{} }
func (m *Definition) String() string { return proto.CompactTextString(m) }
func (*Definition) ProtoMessage() {}
func (*Definition) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{16} }
func (*Definition) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{17} }
func (m *Definition) GetDef() [][]byte {
if m != nil {
@ -799,6 +872,7 @@ func init() {
proto.RegisterType((*Meta)(nil), "pb.Meta")
proto.RegisterType((*Mount)(nil), "pb.Mount")
proto.RegisterType((*CacheOpt)(nil), "pb.CacheOpt")
proto.RegisterType((*SecretOpt)(nil), "pb.SecretOpt")
proto.RegisterType((*CopyOp)(nil), "pb.CopyOp")
proto.RegisterType((*CopySource)(nil), "pb.CopySource")
proto.RegisterType((*SourceOp)(nil), "pb.SourceOp")
@ -1185,6 +1259,18 @@ func (m *Mount) MarshalTo(dAtA []byte) (int, error) {
}
i += n10
}
if m.SecretOpt != nil {
dAtA[i] = 0xaa
i++
dAtA[i] = 0x1
i++
i = encodeVarintOps(dAtA, i, uint64(m.SecretOpt.Size()))
n11, err := m.SecretOpt.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n11
}
return i, nil
}
@ -1217,6 +1303,55 @@ func (m *CacheOpt) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
func (m *SecretOpt) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SecretOpt) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.ID) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintOps(dAtA, i, uint64(len(m.ID)))
i += copy(dAtA[i:], m.ID)
}
if m.Uid != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintOps(dAtA, i, uint64(m.Uid))
}
if m.Gid != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintOps(dAtA, i, uint64(m.Gid))
}
if m.Mode != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintOps(dAtA, i, uint64(m.Mode))
}
if m.Optional {
dAtA[i] = 0x28
i++
if m.Optional {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
return i, nil
}
func (m *CopyOp) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@ -1363,11 +1498,11 @@ func (m *BuildOp) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintOps(dAtA, i, uint64(v.Size()))
n11, err := v.MarshalTo(dAtA[i:])
n12, err := v.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n11
i += n12
}
}
}
@ -1375,11 +1510,11 @@ func (m *BuildOp) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1a
i++
i = encodeVarintOps(dAtA, i, uint64(m.Def.Size()))
n12, err := m.Def.MarshalTo(dAtA[i:])
n13, err := m.Def.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n12
i += n13
}
if len(m.Attrs) > 0 {
for k, _ := range m.Attrs {
@ -1470,11 +1605,32 @@ func (m *OpMetadata) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x22
i++
i = encodeVarintOps(dAtA, i, uint64(m.ExportCache.Size()))
n13, err := m.ExportCache.MarshalTo(dAtA[i:])
n14, err := m.ExportCache.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n13
i += n14
}
if len(m.Caps) > 0 {
for k, _ := range m.Caps {
dAtA[i] = 0x2a
i++
v := m.Caps[k]
mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + 1
i = encodeVarintOps(dAtA, i, uint64(mapSize))
dAtA[i] = 0xa
i++
i = encodeVarintOps(dAtA, i, uint64(len(k)))
i += copy(dAtA[i:], k)
dAtA[i] = 0x10
i++
if v {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
}
return i, nil
}
@ -1624,11 +1780,11 @@ func (m *Definition) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintOps(dAtA, i, uint64((&v).Size()))
n14, err := (&v).MarshalTo(dAtA[i:])
n15, err := (&v).MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n14
i += n15
}
}
return i, nil
@ -1816,6 +1972,10 @@ func (m *Mount) Size() (n int) {
l = m.CacheOpt.Size()
n += 2 + l + sovOps(uint64(l))
}
if m.SecretOpt != nil {
l = m.SecretOpt.Size()
n += 2 + l + sovOps(uint64(l))
}
return n
}
@ -1832,6 +1992,28 @@ func (m *CacheOpt) Size() (n int) {
return n
}
func (m *SecretOpt) Size() (n int) {
var l int
_ = l
l = len(m.ID)
if l > 0 {
n += 1 + l + sovOps(uint64(l))
}
if m.Uid != 0 {
n += 1 + sovOps(uint64(m.Uid))
}
if m.Gid != 0 {
n += 1 + sovOps(uint64(m.Gid))
}
if m.Mode != 0 {
n += 1 + sovOps(uint64(m.Mode))
}
if m.Optional {
n += 2
}
return n
}
func (m *CopyOp) Size() (n int) {
var l int
_ = l
@ -1940,6 +2122,14 @@ func (m *OpMetadata) Size() (n int) {
l = m.ExportCache.Size()
n += 1 + l + sovOps(uint64(l))
}
if len(m.Caps) > 0 {
for k, v := range m.Caps {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + 1
n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize))
}
}
return n
}
@ -3098,6 +3288,39 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
case 21:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SecretOpt", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthOps
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.SecretOpt == nil {
m.SecretOpt = &SecretOpt{}
}
if err := m.SecretOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipOps(dAtA[iNdEx:])
@ -3217,6 +3440,162 @@ func (m *CacheOpt) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *SecretOpt) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SecretOpt: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SecretOpt: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthOps
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType)
}
m.Uid = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Uid |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Gid", wireType)
}
m.Gid = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Gid |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType)
}
m.Mode = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Mode |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Optional = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipOps(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthOps
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CopyOp) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@ -4234,6 +4613,115 @@ func (m *OpMetadata) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Caps", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthOps
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Caps == nil {
m.Caps = make(map[github_com_moby_buildkit_util_apicaps.CapID]bool)
}
var mapkey github_com_moby_buildkit_util_apicaps.CapID
var mapvalue bool
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthOps
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = github_com_moby_buildkit_util_apicaps.CapID(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var mapvaluetemp int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
mapvaluetemp |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
mapvalue = bool(mapvaluetemp != 0)
} else {
iNdEx = entryPreIndex
skippy, err := skipOps(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthOps
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Caps[github_com_moby_buildkit_util_apicaps.CapID(mapkey)] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipOps(dAtA[iNdEx:])
@ -4880,81 +5368,88 @@ var (
func init() { proto.RegisterFile("ops.proto", fileDescriptorOps) }
var fileDescriptorOps = []byte{
// 1203 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4d, 0x8f, 0x1b, 0x45,
0x13, 0xde, 0x19, 0x7f, 0xcd, 0xd4, 0x6c, 0x36, 0x7e, 0x3b, 0x79, 0x83, 0x59, 0xc2, 0xae, 0x99,
0x20, 0xe4, 0x7c, 0xac, 0x57, 0x32, 0x52, 0x88, 0x38, 0x44, 0xac, 0x3f, 0xa2, 0x35, 0x21, 0x38,
0x6a, 0xaf, 0x96, 0x63, 0x34, 0x1e, 0xb7, 0xbd, 0xa3, 0x78, 0xa7, 0x47, 0x3d, 0xed, 0xb0, 0x3e,
0x80, 0x44, 0x7e, 0x01, 0x12, 0x12, 0x77, 0x7e, 0x08, 0xf7, 0x1c, 0xb9, 0xc2, 0x21, 0xa0, 0x20,
0xf1, 0x3b, 0x50, 0x75, 0xb7, 0x67, 0x66, 0x93, 0x20, 0x25, 0x82, 0x93, 0xbb, 0xab, 0x9e, 0x7a,
0xba, 0xea, 0xe9, 0x9a, 0x6a, 0x83, 0xcb, 0x93, 0xb4, 0x9d, 0x08, 0x2e, 0x39, 0xb1, 0x93, 0xc9,
0xf6, 0xde, 0x3c, 0x92, 0x27, 0xcb, 0x49, 0x3b, 0xe4, 0xa7, 0xfb, 0x73, 0x3e, 0xe7, 0xfb, 0xca,
0x35, 0x59, 0xce, 0xd4, 0x4e, 0x6d, 0xd4, 0x4a, 0x87, 0xf8, 0x3f, 0xd9, 0x60, 0x8f, 0x12, 0xf2,
0x01, 0x54, 0xa3, 0x38, 0x59, 0xca, 0xb4, 0x61, 0x35, 0x4b, 0x2d, 0xaf, 0xe3, 0xb6, 0x93, 0x49,
0x7b, 0x88, 0x16, 0x6a, 0x1c, 0xa4, 0x09, 0x65, 0x76, 0xc6, 0xc2, 0x86, 0xdd, 0xb4, 0x5a, 0x5e,
0x07, 0x10, 0x30, 0x38, 0x63, 0xe1, 0x28, 0x39, 0xdc, 0xa0, 0xca, 0x43, 0x3e, 0x82, 0x6a, 0xca,
0x97, 0x22, 0x64, 0x8d, 0x92, 0xc2, 0x6c, 0x22, 0x66, 0xac, 0x2c, 0x0a, 0x65, 0xbc, 0xc8, 0x14,
0xf2, 0x64, 0xd5, 0x28, 0xe7, 0x4c, 0x3d, 0x9e, 0xac, 0x34, 0x13, 0x7a, 0xc8, 0x35, 0xa8, 0x4c,
0x96, 0xd1, 0x62, 0xda, 0xa8, 0x28, 0x88, 0x87, 0x90, 0x2e, 0x1a, 0x14, 0x46, 0xfb, 0x48, 0x0b,
0x9c, 0x64, 0x11, 0xc8, 0x19, 0x17, 0xa7, 0x0d, 0xc8, 0x0f, 0x7c, 0x68, 0x6c, 0x34, 0xf3, 0x92,
0x4f, 0xc0, 0x0b, 0x79, 0x9c, 0x4a, 0x11, 0x44, 0xb1, 0x4c, 0x1b, 0x9e, 0x02, 0xff, 0x1f, 0xc1,
0x5f, 0x71, 0xf1, 0x98, 0x89, 0x5e, 0xee, 0xa4, 0x45, 0x64, 0xb7, 0x0c, 0x36, 0x4f, 0xfc, 0x1f,
0x2d, 0x70, 0xd6, 0xac, 0xc4, 0x87, 0xcd, 0x03, 0x11, 0x9e, 0x44, 0x92, 0x85, 0x72, 0x29, 0x58,
0xc3, 0x6a, 0x5a, 0x2d, 0x97, 0x9e, 0xb3, 0x91, 0x2d, 0xb0, 0x47, 0x63, 0x25, 0x94, 0x4b, 0xed,
0xd1, 0x98, 0x34, 0xa0, 0x76, 0x1c, 0x88, 0x28, 0x88, 0xa5, 0x52, 0xc6, 0xa5, 0xeb, 0x2d, 0xb9,
0x0a, 0xee, 0x68, 0x7c, 0xcc, 0x44, 0x1a, 0xf1, 0x58, 0xe9, 0xe1, 0xd2, 0xdc, 0x40, 0x76, 0x00,
0x46, 0xe3, 0x7b, 0x2c, 0x40, 0xd2, 0xb4, 0x51, 0x69, 0x96, 0x5a, 0x2e, 0x2d, 0x58, 0xfc, 0x6f,
0xa1, 0xa2, 0xee, 0x88, 0x7c, 0x0e, 0xd5, 0x69, 0x34, 0x67, 0xa9, 0xd4, 0xe9, 0x74, 0x3b, 0xcf,
0x9e, 0xef, 0x6e, 0xfc, 0xf6, 0x7c, 0xf7, 0x46, 0xa1, 0x19, 0x78, 0xc2, 0xe2, 0x90, 0xc7, 0x32,
0x88, 0x62, 0x26, 0xd2, 0xfd, 0x39, 0xdf, 0xd3, 0x21, 0xed, 0xbe, 0xfa, 0xa1, 0x86, 0x81, 0x5c,
0x87, 0x4a, 0x14, 0x4f, 0xd9, 0x99, 0xca, 0xbf, 0xd4, 0xbd, 0x64, 0xa8, 0xbc, 0xd1, 0x52, 0x26,
0x4b, 0x39, 0x44, 0x17, 0xd5, 0x08, 0x7f, 0x08, 0x55, 0xdd, 0x02, 0xe4, 0x2a, 0x94, 0x4f, 0x99,
0x0c, 0xd4, 0xf1, 0x5e, 0xc7, 0x41, 0x69, 0x1f, 0x30, 0x19, 0x50, 0x65, 0xc5, 0xee, 0x3a, 0xe5,
0x4b, 0x94, 0xde, 0xce, 0xbb, 0xeb, 0x01, 0x5a, 0xa8, 0x71, 0xf8, 0xdf, 0x40, 0x19, 0x03, 0x08,
0x81, 0x72, 0x20, 0xe6, 0xba, 0x0d, 0x5d, 0xaa, 0xd6, 0xa4, 0x0e, 0x25, 0x16, 0x3f, 0x51, 0xb1,
0x2e, 0xc5, 0x25, 0x5a, 0xc2, 0xaf, 0xa7, 0x46, 0x4c, 0x5c, 0x62, 0xdc, 0x32, 0x65, 0xc2, 0x68,
0xa8, 0xd6, 0xe4, 0x3a, 0xb8, 0x89, 0xe0, 0x67, 0xab, 0x47, 0x18, 0x5d, 0x29, 0x74, 0x08, 0x1a,
0x07, 0xf1, 0x13, 0xea, 0x24, 0x66, 0xe5, 0x7f, 0x67, 0x43, 0x45, 0x25, 0x44, 0x5a, 0x58, 0x7e,
0xb2, 0xd4, 0x4a, 0x96, 0xba, 0xc4, 0x94, 0x0f, 0x4a, 0xe8, 0xac, 0x7a, 0x14, 0x7d, 0x1b, 0x9c,
0x94, 0x2d, 0x58, 0x28, 0xb9, 0x30, 0x77, 0x9d, 0xed, 0x31, 0x9d, 0x29, 0x5e, 0x87, 0xce, 0x50,
0xad, 0xc9, 0x4d, 0xa8, 0x72, 0xa5, 0xa1, 0x4a, 0xf2, 0x1f, 0x94, 0x35, 0x10, 0x24, 0x17, 0x2c,
0x98, 0xf2, 0x78, 0xb1, 0x52, 0xa9, 0x3b, 0x34, 0xdb, 0x93, 0x9b, 0xe0, 0x2a, 0xd5, 0x8e, 0x56,
0x09, 0x6b, 0x54, 0x9b, 0x56, 0x6b, 0xab, 0x73, 0x21, 0x53, 0x14, 0x8d, 0x34, 0xf7, 0xe3, 0x57,
0x12, 0x06, 0xe1, 0x09, 0x1b, 0x25, 0xb2, 0x71, 0x39, 0xd7, 0xa0, 0x67, 0x6c, 0x34, 0xf3, 0xfa,
0x43, 0x70, 0xd6, 0x56, 0xec, 0xe0, 0x61, 0xdf, 0xf4, 0xb6, 0x3d, 0xec, 0x93, 0x3d, 0xa8, 0xa5,
0x27, 0x81, 0x88, 0xe2, 0xb9, 0x2a, 0x75, 0xab, 0x73, 0x29, 0x23, 0x19, 0x6b, 0x3b, 0x72, 0xad,
0x31, 0xfe, 0x5d, 0xa8, 0xea, 0x2f, 0x9a, 0x34, 0xa1, 0x94, 0x8a, 0xd0, 0x4c, 0x95, 0xad, 0xf5,
0xa7, 0xae, 0x87, 0x02, 0x45, 0x57, 0x26, 0x95, 0x9d, 0x4b, 0xe5, 0x53, 0x80, 0x1c, 0xf6, 0xdf,
0x5c, 0x89, 0xff, 0x83, 0x05, 0xce, 0x7a, 0x18, 0xe1, 0x97, 0x15, 0x4d, 0x59, 0x2c, 0xa3, 0x59,
0xc4, 0x84, 0xa9, 0xb3, 0x60, 0x21, 0x7b, 0x50, 0x09, 0xa4, 0x14, 0xeb, 0x86, 0x7d, 0xa7, 0x38,
0xc9, 0xda, 0x07, 0xe8, 0x19, 0xc4, 0x52, 0xac, 0xa8, 0x46, 0x6d, 0xdf, 0x01, 0xc8, 0x8d, 0xd8,
0x9d, 0x8f, 0xd9, 0xca, 0xb0, 0xe2, 0x92, 0x5c, 0x86, 0xca, 0x93, 0x60, 0xb1, 0x64, 0x26, 0x29,
0xbd, 0xf9, 0xd4, 0xbe, 0x63, 0xf9, 0x3f, 0xdb, 0x50, 0x33, 0x93, 0x8d, 0xdc, 0x82, 0x9a, 0x9a,
0x6c, 0x26, 0xa3, 0xd7, 0x57, 0xba, 0x86, 0x90, 0xfd, 0x6c, 0x64, 0x17, 0x72, 0x34, 0x54, 0x7a,
0x74, 0x9b, 0x1c, 0xf3, 0x01, 0x5e, 0x9a, 0xb2, 0x99, 0x99, 0xcd, 0xea, 0x2a, 0xfa, 0x6c, 0x16,
0xc5, 0x91, 0x8c, 0x78, 0x4c, 0xd1, 0x45, 0x6e, 0xad, 0xab, 0x2e, 0x2b, 0xc6, 0x2b, 0x45, 0xc6,
0x57, 0x8b, 0x1e, 0x82, 0x57, 0x38, 0xe6, 0x35, 0x55, 0x7f, 0x58, 0xac, 0xda, 0x1c, 0xa9, 0xe8,
0xf4, 0xc3, 0x92, 0xab, 0xf0, 0x2f, 0xf4, 0xbb, 0x0d, 0x90, 0x53, 0xbe, 0x79, 0xa7, 0xf8, 0x7f,
0x59, 0x00, 0xa3, 0x04, 0x47, 0xce, 0x34, 0x50, 0x13, 0x6a, 0x33, 0x9a, 0xc7, 0x5c, 0xb0, 0x47,
0xea, 0x73, 0x50, 0xf1, 0x0e, 0xf5, 0xb4, 0x4d, 0xb5, 0x39, 0x39, 0x00, 0x6f, 0xca, 0xd2, 0x50,
0x44, 0x09, 0x0a, 0x66, 0x44, 0xdf, 0xc5, 0x9a, 0x72, 0x9e, 0x76, 0x3f, 0x47, 0x68, 0xad, 0x8a,
0x31, 0xa4, 0x03, 0x9b, 0xec, 0x2c, 0xe1, 0x42, 0x9a, 0x53, 0xf4, 0x03, 0x78, 0x51, 0x3f, 0xa5,
0x68, 0x57, 0x27, 0x51, 0x8f, 0xe5, 0x9b, 0xed, 0xbb, 0x50, 0x7f, 0x99, 0xf4, 0xad, 0x04, 0xba,
0x06, 0x5e, 0x81, 0x1b, 0x81, 0xc7, 0x0a, 0xa8, 0x2b, 0xd4, 0x1b, 0xff, 0x29, 0xbe, 0x70, 0x66,
0x16, 0x92, 0xf7, 0x01, 0x4e, 0xa4, 0x4c, 0x1e, 0xa9, 0xe1, 0x68, 0x0e, 0x71, 0xd1, 0xa2, 0x10,
0x64, 0x17, 0x3c, 0xdc, 0xa4, 0xc6, 0xaf, 0x0f, 0x54, 0x11, 0xa9, 0x06, 0xbc, 0x07, 0xee, 0x2c,
0x0b, 0xd7, 0x03, 0xd0, 0x99, 0xad, 0xa3, 0xdf, 0x05, 0x27, 0xe6, 0xc6, 0xa7, 0x67, 0x75, 0x2d,
0xe6, 0xca, 0xe5, 0xdf, 0x84, 0xff, 0xbd, 0xf2, 0x1c, 0x93, 0x2b, 0x50, 0x9d, 0x45, 0x0b, 0xa9,
0x3e, 0x09, 0x1c, 0xff, 0x66, 0xe7, 0xff, 0x6a, 0x01, 0xe4, 0xed, 0x8b, 0x8a, 0x60, 0x6f, 0x23,
0x66, 0x53, 0xf7, 0xf2, 0x02, 0x9c, 0x53, 0x73, 0x2b, 0xe6, 0xae, 0xae, 0x9e, 0x6f, 0xf9, 0xf6,
0xfa, 0xd2, 0x94, 0xa6, 0xfa, 0xc9, 0x7c, 0xfa, 0xfb, 0x5b, 0x3d, 0x99, 0xd9, 0x09, 0xdb, 0xf7,
0xe1, 0xc2, 0x39, 0xba, 0x37, 0xfc, 0x1a, 0xf2, 0xce, 0x29, 0x5c, 0xd9, 0x8d, 0xcf, 0xc0, 0xcd,
0x46, 0x39, 0x71, 0xa0, 0xdc, 0x1d, 0x7e, 0xd9, 0xaf, 0x6f, 0x10, 0x80, 0xea, 0x78, 0xd0, 0xa3,
0x83, 0xa3, 0xba, 0x45, 0x6a, 0x50, 0x1a, 0x8f, 0x0f, 0xeb, 0x36, 0x71, 0xa1, 0xd2, 0x3b, 0xe8,
0x1d, 0x0e, 0xea, 0x25, 0x5c, 0x1e, 0x3d, 0x78, 0x78, 0x6f, 0x5c, 0x2f, 0xdf, 0xb8, 0x0d, 0x17,
0x5f, 0x9a, 0xcd, 0x2a, 0xfa, 0xf0, 0x80, 0x0e, 0x90, 0xc9, 0x83, 0xda, 0x43, 0x3a, 0x3c, 0x3e,
0x38, 0x1a, 0xd4, 0x2d, 0x74, 0x7c, 0x31, 0xea, 0xdd, 0x1f, 0xf4, 0xeb, 0x76, 0xb7, 0xfe, 0xec,
0xc5, 0x8e, 0xf5, 0xcb, 0x8b, 0x1d, 0xeb, 0x8f, 0x17, 0x3b, 0xd6, 0xf7, 0x7f, 0xee, 0x6c, 0x4c,
0xaa, 0xea, 0x6f, 0xe2, 0xc7, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x42, 0x80, 0x11, 0x1b, 0x66,
0x0a, 0x00, 0x00,
// 1328 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xcb, 0x6e, 0xdb, 0x46,
0x17, 0x36, 0xa9, 0x8b, 0xc9, 0x43, 0xdb, 0xd1, 0x3f, 0xb9, 0xfc, 0xfa, 0xfd, 0xa7, 0xb6, 0xcb,
0x14, 0x85, 0x13, 0xc7, 0x12, 0xa0, 0x00, 0x49, 0xd0, 0x45, 0x50, 0xeb, 0x12, 0x58, 0x4d, 0x53,
0x05, 0x23, 0xc3, 0x5d, 0x06, 0x14, 0x35, 0x92, 0x89, 0x48, 0x1c, 0x82, 0x1c, 0xa6, 0xd6, 0xa2,
0x5d, 0xe4, 0x09, 0x0a, 0x14, 0xe8, 0xbe, 0x2f, 0xd0, 0x37, 0xe8, 0x3e, 0xcb, 0x6e, 0xdb, 0x45,
0x5a, 0xa4, 0x2f, 0x52, 0x9c, 0x99, 0xe1, 0x25, 0x97, 0xa2, 0x09, 0xda, 0x15, 0x67, 0xce, 0xe5,
0x9b, 0x33, 0xdf, 0x39, 0x67, 0x0e, 0xc1, 0xe6, 0x51, 0xd2, 0x8a, 0x62, 0x2e, 0x38, 0x31, 0xa3,
0xc9, 0xf6, 0xe1, 0x3c, 0x10, 0x67, 0xe9, 0xa4, 0xe5, 0xf3, 0x65, 0x7b, 0xce, 0xe7, 0xbc, 0x2d,
0x55, 0x93, 0x74, 0x26, 0x77, 0x72, 0x23, 0x57, 0xca, 0xc5, 0xfd, 0xc1, 0x04, 0x73, 0x14, 0x91,
0x0f, 0xa1, 0x1e, 0x84, 0x51, 0x2a, 0x92, 0xa6, 0xb1, 0x57, 0xd9, 0x77, 0x3a, 0x76, 0x2b, 0x9a,
0xb4, 0x86, 0x28, 0xa1, 0x5a, 0x41, 0xf6, 0xa0, 0xca, 0xce, 0x99, 0xdf, 0x34, 0xf7, 0x8c, 0x7d,
0xa7, 0x03, 0x68, 0x30, 0x38, 0x67, 0xfe, 0x28, 0x3a, 0x5e, 0xa3, 0x52, 0x43, 0x3e, 0x86, 0x7a,
0xc2, 0xd3, 0xd8, 0x67, 0xcd, 0x8a, 0xb4, 0xd9, 0x40, 0x9b, 0xb1, 0x94, 0x48, 0x2b, 0xad, 0x45,
0x24, 0x9f, 0x47, 0xab, 0x66, 0xb5, 0x40, 0xea, 0xf1, 0x68, 0xa5, 0x90, 0x50, 0x43, 0xae, 0x41,
0x6d, 0x92, 0x06, 0x8b, 0x69, 0xb3, 0x26, 0x4d, 0x1c, 0x34, 0xe9, 0xa2, 0x40, 0xda, 0x28, 0x1d,
0xd9, 0x07, 0x2b, 0x5a, 0x78, 0x62, 0xc6, 0xe3, 0x65, 0x13, 0x8a, 0x03, 0x1f, 0x69, 0x19, 0xcd,
0xb5, 0xe4, 0x0e, 0x38, 0x3e, 0x0f, 0x13, 0x11, 0x7b, 0x41, 0x28, 0x92, 0xa6, 0x23, 0x8d, 0x2f,
0xa3, 0xf1, 0x97, 0x3c, 0x7e, 0xc2, 0xe2, 0x5e, 0xa1, 0xa4, 0x65, 0xcb, 0x6e, 0x15, 0x4c, 0x1e,
0xb9, 0xdf, 0x1b, 0x60, 0x65, 0xa8, 0xc4, 0x85, 0x8d, 0xa3, 0xd8, 0x3f, 0x0b, 0x04, 0xf3, 0x45,
0x1a, 0xb3, 0xa6, 0xb1, 0x67, 0xec, 0xdb, 0xf4, 0x15, 0x19, 0xd9, 0x02, 0x73, 0x34, 0x96, 0x44,
0xd9, 0xd4, 0x1c, 0x8d, 0x49, 0x13, 0xd6, 0x4f, 0xbd, 0x38, 0xf0, 0x42, 0x21, 0x99, 0xb1, 0x69,
0xb6, 0x25, 0x57, 0xc1, 0x1e, 0x8d, 0x4f, 0x59, 0x9c, 0x04, 0x3c, 0x94, 0x7c, 0xd8, 0xb4, 0x10,
0x90, 0x1d, 0x80, 0xd1, 0xf8, 0x3e, 0xf3, 0x10, 0x34, 0x69, 0xd6, 0xf6, 0x2a, 0xfb, 0x36, 0x2d,
0x49, 0xdc, 0x6f, 0xa0, 0x26, 0x73, 0x44, 0x3e, 0x83, 0xfa, 0x34, 0x98, 0xb3, 0x44, 0xa8, 0x70,
0xba, 0x9d, 0xe7, 0x2f, 0x76, 0xd7, 0x7e, 0x7d, 0xb1, 0x7b, 0xa3, 0x54, 0x0c, 0x3c, 0x62, 0xa1,
0xcf, 0x43, 0xe1, 0x05, 0x21, 0x8b, 0x93, 0xf6, 0x9c, 0x1f, 0x2a, 0x97, 0x56, 0x5f, 0x7e, 0xa8,
0x46, 0x20, 0xd7, 0xa1, 0x16, 0x84, 0x53, 0x76, 0x2e, 0xe3, 0xaf, 0x74, 0x2f, 0x6a, 0x28, 0x67,
0x94, 0x8a, 0x28, 0x15, 0x43, 0x54, 0x51, 0x65, 0xe1, 0x0e, 0xa1, 0xae, 0x4a, 0x80, 0x5c, 0x85,
0xea, 0x92, 0x09, 0x4f, 0x1e, 0xef, 0x74, 0x2c, 0xa4, 0xf6, 0x21, 0x13, 0x1e, 0x95, 0x52, 0xac,
0xae, 0x25, 0x4f, 0x91, 0x7a, 0xb3, 0xa8, 0xae, 0x87, 0x28, 0xa1, 0x5a, 0xe1, 0x7e, 0x0d, 0x55,
0x74, 0x20, 0x04, 0xaa, 0x5e, 0x3c, 0x57, 0x65, 0x68, 0x53, 0xb9, 0x26, 0x0d, 0xa8, 0xb0, 0xf0,
0xa9, 0xf4, 0xb5, 0x29, 0x2e, 0x51, 0xe2, 0x7f, 0x35, 0xd5, 0x64, 0xe2, 0x12, 0xfd, 0xd2, 0x84,
0xc5, 0x9a, 0x43, 0xb9, 0x26, 0xd7, 0xc1, 0x8e, 0x62, 0x7e, 0xbe, 0x7a, 0x8c, 0xde, 0xb5, 0x52,
0x85, 0xa0, 0x70, 0x10, 0x3e, 0xa5, 0x56, 0xa4, 0x57, 0xee, 0x8f, 0x26, 0xd4, 0x64, 0x40, 0x64,
0x1f, 0xaf, 0x1f, 0xa5, 0x8a, 0xc9, 0x4a, 0x97, 0xe8, 0xeb, 0x83, 0x24, 0x3a, 0xbf, 0x3d, 0x92,
0xbe, 0x0d, 0x56, 0xc2, 0x16, 0xcc, 0x17, 0x3c, 0xd6, 0xb9, 0xce, 0xf7, 0x18, 0xce, 0x14, 0xd3,
0xa1, 0x22, 0x94, 0x6b, 0x72, 0x00, 0x75, 0x2e, 0x39, 0x94, 0x41, 0xfe, 0x05, 0xb3, 0xda, 0x04,
0xc1, 0x63, 0xe6, 0x4d, 0x79, 0xb8, 0x58, 0xc9, 0xd0, 0x2d, 0x9a, 0xef, 0xc9, 0x01, 0xd8, 0x92,
0xb5, 0x93, 0x55, 0xc4, 0x9a, 0xf5, 0x3d, 0x63, 0x7f, 0xab, 0xb3, 0x99, 0x33, 0x8a, 0x42, 0x5a,
0xe8, 0xb1, 0x4b, 0x7c, 0xcf, 0x3f, 0x63, 0xa3, 0x48, 0x34, 0x2f, 0x15, 0x1c, 0xf4, 0xb4, 0x8c,
0xe6, 0x5a, 0x84, 0x4d, 0x98, 0x1f, 0x33, 0x81, 0xa6, 0x97, 0xa5, 0xa9, 0x84, 0x1d, 0x67, 0x42,
0x5a, 0xe8, 0xdd, 0x21, 0x58, 0x19, 0x04, 0x96, 0xfb, 0xb0, 0xaf, 0x1b, 0xc1, 0x1c, 0xf6, 0xc9,
0x21, 0xac, 0x27, 0x67, 0x5e, 0x1c, 0x84, 0x73, 0xc9, 0xcb, 0x56, 0xe7, 0x62, 0x7e, 0xe2, 0x58,
0xc9, 0x11, 0x2c, 0xb3, 0x71, 0x39, 0xd8, 0xf9, 0x11, 0x6f, 0x60, 0x35, 0xa0, 0x92, 0x06, 0x53,
0x89, 0xb3, 0x49, 0x71, 0x89, 0x92, 0x79, 0xa0, 0x72, 0xbf, 0x49, 0x71, 0x89, 0x64, 0x2f, 0xf9,
0x94, 0x49, 0x5a, 0x37, 0xa9, 0x5c, 0x23, 0x7f, 0x3c, 0x12, 0x01, 0x0f, 0xbd, 0x45, 0xc6, 0x5f,
0xb6, 0x77, 0xef, 0x41, 0x5d, 0xbd, 0x37, 0x64, 0x0f, 0x2a, 0x49, 0xec, 0xeb, 0x37, 0x6f, 0x2b,
0x7b, 0x88, 0xd4, 0x93, 0x45, 0x51, 0x95, 0x27, 0xd2, 0x2c, 0x12, 0xe9, 0x52, 0x80, 0xc2, 0xec,
0xdf, 0x29, 0x18, 0xf7, 0x3b, 0x03, 0xac, 0xec, 0xa9, 0xc4, 0xbe, 0x0f, 0xa6, 0x2c, 0x14, 0xc1,
0x2c, 0x60, 0xb1, 0x26, 0xa3, 0x24, 0x21, 0x87, 0x50, 0xf3, 0x84, 0x88, 0xb3, 0x76, 0xfa, 0x6f,
0xf9, 0x9d, 0x6d, 0x1d, 0xa1, 0x66, 0x10, 0x8a, 0x78, 0x45, 0x95, 0xd5, 0xf6, 0x5d, 0x80, 0x42,
0x88, 0xfc, 0x3d, 0x61, 0x2b, 0x8d, 0x8a, 0x4b, 0x72, 0x09, 0x6a, 0x4f, 0xbd, 0x45, 0xca, 0x74,
0x50, 0x6a, 0xf3, 0x89, 0x79, 0xd7, 0x70, 0x7f, 0x32, 0x61, 0x5d, 0xbf, 0xbb, 0xe4, 0x26, 0xac,
0xcb, 0x77, 0x57, 0x47, 0xf4, 0xf6, 0x9b, 0x66, 0x26, 0xa4, 0x9d, 0x0f, 0x94, 0x52, 0x8c, 0x1a,
0x4a, 0x0d, 0x16, 0x1d, 0x63, 0x31, 0x5e, 0x2a, 0x53, 0x36, 0xd3, 0x93, 0x43, 0xa6, 0xa2, 0xcf,
0x66, 0x41, 0x18, 0x60, 0xce, 0x28, 0xaa, 0xc8, 0xcd, 0xec, 0xd6, 0x55, 0x89, 0x78, 0xa5, 0x8c,
0xf8, 0xe6, 0xa5, 0x87, 0xe0, 0x94, 0x8e, 0x79, 0xcb, 0xad, 0x3f, 0x2a, 0xdf, 0x5a, 0x1f, 0x29,
0xe1, 0xd4, 0xd8, 0x2b, 0x58, 0xf8, 0x07, 0xfc, 0xdd, 0x06, 0x28, 0x20, 0xdf, 0xbd, 0x52, 0xdc,
0x67, 0x15, 0x80, 0x51, 0x84, 0x0f, 0xe2, 0xd4, 0x93, 0xef, 0xe7, 0x46, 0x30, 0x0f, 0x79, 0xcc,
0x1e, 0xcb, 0x66, 0x95, 0xfe, 0x16, 0x75, 0x94, 0x4c, 0xf6, 0x15, 0x39, 0x02, 0x67, 0xca, 0x12,
0x3f, 0x0e, 0x64, 0x91, 0x6b, 0xd2, 0x77, 0xf1, 0x4e, 0x05, 0x4e, 0xab, 0x5f, 0x58, 0x28, 0xae,
0xca, 0x3e, 0xa4, 0x03, 0x1b, 0xec, 0x3c, 0xe2, 0xb1, 0xd0, 0xa7, 0xa8, 0xf1, 0x7c, 0x41, 0x0d,
0x7a, 0x94, 0xcb, 0x93, 0xa8, 0xc3, 0x8a, 0x0d, 0xf1, 0xa0, 0xea, 0x7b, 0x91, 0x9a, 0x4d, 0x4e,
0xa7, 0xf9, 0xda, 0x79, 0x3d, 0x2f, 0x52, 0xa4, 0x75, 0x6f, 0xe1, 0x5d, 0x9f, 0xfd, 0xb6, 0x7b,
0x50, 0x1a, 0x48, 0x4b, 0x3e, 0x59, 0xb5, 0x65, 0xbd, 0x3c, 0x09, 0x44, 0x3b, 0x15, 0xc1, 0xa2,
0xed, 0x45, 0x01, 0xc2, 0xa1, 0xe3, 0xb0, 0x4f, 0x25, 0xf4, 0xf6, 0x3d, 0x68, 0xbc, 0x1e, 0xf7,
0xfb, 0xe4, 0x60, 0xfb, 0x0e, 0xd8, 0x79, 0x1c, 0x7f, 0xe7, 0x68, 0x95, 0x93, 0x77, 0x0d, 0x9c,
0xd2, 0xbd, 0xd1, 0xf0, 0x54, 0x1a, 0x2a, 0xf6, 0xd5, 0xc6, 0x7d, 0x86, 0xff, 0x06, 0x7a, 0x8a,
0x90, 0x0f, 0x00, 0xce, 0x84, 0x88, 0x1e, 0xcb, 0xb1, 0xa2, 0x0f, 0xb1, 0x51, 0x22, 0x2d, 0xc8,
0x2e, 0x38, 0xb8, 0x49, 0xb4, 0x5e, 0x45, 0x2a, 0x3d, 0x12, 0x65, 0xf0, 0x7f, 0xb0, 0x67, 0xb9,
0xbb, 0x1a, 0x1d, 0xd6, 0x2c, 0xf3, 0xfe, 0x1f, 0x58, 0x21, 0xd7, 0x3a, 0x35, 0xe5, 0xd6, 0x43,
0x2e, 0x55, 0xee, 0x01, 0xfc, 0xe7, 0x8d, 0x1f, 0x19, 0x72, 0x05, 0xea, 0xb3, 0x60, 0x21, 0x64,
0xbb, 0xe2, 0xe0, 0xd4, 0x3b, 0xf7, 0x17, 0x03, 0xa0, 0x68, 0x2d, 0x64, 0x04, 0xfb, 0x0e, 0x6d,
0x36, 0x54, 0x9f, 0x2d, 0xc0, 0x5a, 0xea, 0x0c, 0xea, 0x3a, 0xba, 0xfa, 0x6a, 0x3b, 0xb6, 0xb2,
0x04, 0xab, 0xdc, 0x76, 0x74, 0x6e, 0xdf, 0xe7, 0x67, 0x23, 0x3f, 0x61, 0xfb, 0x01, 0x6c, 0xbe,
0x02, 0xf7, 0x8e, 0x9d, 0x5a, 0x54, 0x59, 0x29, 0x65, 0x37, 0x3e, 0x05, 0x3b, 0x1f, 0x82, 0xc4,
0x82, 0x6a, 0x77, 0xf8, 0x45, 0xbf, 0xb1, 0x46, 0x00, 0xea, 0xe3, 0x41, 0x8f, 0x0e, 0x4e, 0x1a,
0x06, 0x59, 0x87, 0xca, 0x78, 0x7c, 0xdc, 0x30, 0x89, 0x0d, 0xb5, 0xde, 0x51, 0xef, 0x78, 0xd0,
0xa8, 0xe0, 0xf2, 0xe4, 0xe1, 0xa3, 0xfb, 0xe3, 0x46, 0xf5, 0xc6, 0x6d, 0xb8, 0xf0, 0xda, 0xa0,
0x92, 0xde, 0xc7, 0x47, 0x74, 0x80, 0x48, 0x0e, 0xac, 0x3f, 0xa2, 0xc3, 0xd3, 0xa3, 0x93, 0x41,
0xc3, 0x40, 0xc5, 0xe7, 0xa3, 0xde, 0x83, 0x41, 0xbf, 0x61, 0x76, 0x1b, 0xcf, 0x5f, 0xee, 0x18,
0x3f, 0xbf, 0xdc, 0x31, 0x7e, 0x7f, 0xb9, 0x63, 0x7c, 0xfb, 0xc7, 0xce, 0xda, 0xa4, 0x2e, 0x7f,
0xb0, 0x6f, 0xfd, 0x19, 0x00, 0x00, 0xff, 0xff, 0xa3, 0xba, 0x5c, 0x6e, 0xa0, 0x0b, 0x00, 0x00,
}

View file

@ -63,6 +63,7 @@ message Mount {
bool readonly = 5;
MountType mountType = 6;
CacheOpt cacheOpt = 20;
SecretOpt secretOpt = 21;
}
// MountType defines a type of a mount from a supported set
@ -92,6 +93,21 @@ enum CacheSharingOpt {
LOCKED = 2;
}
// SecretOpt defines options describing secret mounts
message SecretOpt {
// ID of secret. Used for quering the value.
string ID = 1;
// UID of secret file
uint32 uid = 2;
// GID of secret file
uint32 gid = 3;
// Mode is the filesystem mode of secret file
uint32 mode = 4;
// Optional defines if secret value is required. Error is produced
// if value is not found and optional is false.
bool optional = 5;
}
// CopyOp copies files across Ops.
message CopyOp {
repeated CopySource src = 1;
@ -137,6 +153,8 @@ message OpMetadata {
// index 3 reserved for WorkerConstraint in previous versions
// WorkerConstraint worker_constraint = 3;
ExportCache export_cache = 4;
map<string, bool> caps = 5 [(gogoproto.castkey) = "github.com/moby/buildkit/util/apicaps.CapID", (gogoproto.nullable) = false];
}
message ExportCache {

View file

@ -58,16 +58,16 @@ func (r *splitResult) Release(ctx context.Context) error {
}
// NewCachedResult combines a result and cache key into cached result
func NewCachedResult(res Result, k ExportableCacheKey) CachedResult {
func NewCachedResult(res Result, k []ExportableCacheKey) CachedResult {
return &cachedResult{res, k}
}
type cachedResult struct {
Result
k ExportableCacheKey
k []ExportableCacheKey
}
func (cr *cachedResult) CacheKey() ExportableCacheKey {
func (cr *cachedResult) CacheKeys() []ExportableCacheKey {
return cr.k
}
@ -95,8 +95,8 @@ func (r *clonedCachedResult) ID() string {
return r.Result.ID()
}
func (cr *clonedCachedResult) CacheKey() ExportableCacheKey {
return cr.cr.CacheKey()
func (cr *clonedCachedResult) CacheKeys() []ExportableCacheKey {
return cr.cr.CacheKeys()
}
type SharedCachedResult struct {

View file

@ -306,7 +306,9 @@ func (s *scheduler) mergeTo(target, src *edge) bool {
target.secondaryExporters = append(target.secondaryExporters, expDep{i, CacheKeyWithSelector{CacheKey: *d.slowCacheKey}})
}
if d.result != nil {
target.secondaryExporters = append(target.secondaryExporters, expDep{i, CacheKeyWithSelector{CacheKey: d.result.CacheKey(), Selector: src.cacheMap.Deps[i].Selector}})
for _, dk := range d.result.CacheKeys() {
target.secondaryExporters = append(target.secondaryExporters, expDep{i, CacheKeyWithSelector{CacheKey: dk, Selector: src.cacheMap.Deps[i].Selector}})
}
}
}

View file

@ -50,7 +50,7 @@ type Result interface {
// CachedResult is a result connected with its cache key
type CachedResult interface {
Result
CacheKey() ExportableCacheKey
CacheKeys() []ExportableCacheKey
}
// CacheExportMode is the type for setting cache exporting modes

View file

@ -15,6 +15,7 @@ import (
"github.com/docker/docker/pkg/locker"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/source"
@ -263,7 +264,7 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context) (out cache.ImmutableRe
}
}
checkoutRef, err := gs.cache.New(ctx, nil, cache.WithDescription(fmt.Sprintf("git snapshot for %s#%s", gs.src.Remote, ref)))
checkoutRef, err := gs.cache.New(ctx, nil, cache.WithRecordType(client.UsageRecordTypeGitCheckout), cache.WithDescription(fmt.Sprintf("git snapshot for %s#%s", gs.src.Remote, ref)))
if err != nil {
return nil, errors.Wrapf(err, "failed to create new mutable for %s", gs.src.Remote)
}

View file

@ -6,6 +6,7 @@ import (
"strings"
"github.com/containerd/containerd/reference"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/solver/pb"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go/v1"
@ -17,6 +18,14 @@ var (
errNotFound = errors.New("not found")
)
type ResolveMode int
const (
ResolveModeDefault ResolveMode = iota
ResolveModeForcePull
ResolveModePreferLocal
)
const (
DockerImageScheme = "docker-image"
GitScheme = "git"
@ -51,18 +60,38 @@ func FromString(s string) (Identifier, error) {
return nil, errors.Wrapf(errNotFound, "unknown schema %s", parts[0])
}
}
func FromLLB(op *pb.Op_Source, platform *pb.Platform) (Identifier, error) {
id, err := FromString(op.Source.Identifier)
if err != nil {
return nil, err
}
if id, ok := id.(*ImageIdentifier); ok && platform != nil {
id.Platform = &specs.Platform{
OS: platform.OS,
Architecture: platform.Architecture,
Variant: platform.Variant,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
if id, ok := id.(*ImageIdentifier); ok {
if platform != nil {
id.Platform = &specs.Platform{
OS: platform.OS,
Architecture: platform.Architecture,
Variant: platform.Variant,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
}
}
for k, v := range op.Source.Attrs {
switch k {
case pb.AttrImageResolveMode:
rm, err := ParseImageResolveMode(v)
if err != nil {
return nil, err
}
id.ResolveMode = rm
case pb.AttrImageRecordType:
rt, err := parseImageRecordType(v)
if err != nil {
return nil, err
}
id.RecordType = rt
}
}
}
if id, ok := id.(*GitIdentifier); ok {
@ -145,8 +174,10 @@ func FromLLB(op *pb.Op_Source, platform *pb.Platform) (Identifier, error) {
}
type ImageIdentifier struct {
Reference reference.Spec
Platform *specs.Platform
Reference reference.Spec
Platform *specs.Platform
ResolveMode ResolveMode
RecordType client.UsageRecordType
}
func NewImageIdentifier(str string) (*ImageIdentifier, error) {
@ -203,3 +234,29 @@ type HttpIdentifier struct {
func (_ *HttpIdentifier) ID() string {
return HttpsScheme
}
func ParseImageResolveMode(v string) (ResolveMode, error) {
switch v {
case pb.AttrImageResolveModeDefault, "":
return ResolveModeDefault, nil
case pb.AttrImageResolveModeForcePull:
return ResolveModeForcePull, nil
case pb.AttrImageResolveModePreferLocal:
return ResolveModePreferLocal, nil
default:
return 0, errors.Errorf("invalid resolvemode: %s", v)
}
}
func parseImageRecordType(v string) (client.UsageRecordType, error) {
switch client.UsageRecordType(v) {
case "", client.UsageRecordTypeRegular:
return client.UsageRecordTypeRegular, nil
case client.UsageRecordTypeInternal:
return client.UsageRecordTypeInternal, nil
case client.UsageRecordTypeFrontend:
return client.UsageRecordTypeFrontend, nil
default:
return "", errors.Errorf("invalid record type %s", v)
}
}

View file

@ -10,6 +10,7 @@ import (
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/contenthash"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/filesync"
"github.com/moby/buildkit/snapshot"
@ -20,6 +21,8 @@ import (
"github.com/sirupsen/logrus"
"github.com/tonistiigi/fsutil"
"golang.org/x/time/rate"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
const keySharedKey = "local.sharedKey"
@ -80,7 +83,8 @@ func (ls *localSourceHandler) CacheKey(ctx context.Context, index int) (string,
SessionID string
IncludePatterns []string
ExcludePatterns []string
}{SessionID: sessionID, IncludePatterns: ls.src.IncludePatterns, ExcludePatterns: ls.src.ExcludePatterns})
FollowPaths []string
}{SessionID: sessionID, IncludePatterns: ls.src.IncludePatterns, ExcludePatterns: ls.src.ExcludePatterns, FollowPaths: ls.src.FollowPaths})
if err != nil {
return "", false, err
}
@ -118,7 +122,7 @@ func (ls *localSourceHandler) Snapshot(ctx context.Context) (out cache.Immutable
}
if mutable == nil {
m, err := ls.cm.New(ctx, nil, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("local source for %s", ls.src.Name)))
m, err := ls.cm.New(ctx, nil, cache.CachePolicyRetain, cache.WithRecordType(client.UsageRecordTypeLocalSource), cache.WithDescription(fmt.Sprintf("local source for %s", ls.src.Name)))
if err != nil {
return nil, err
}
@ -167,6 +171,9 @@ func (ls *localSourceHandler) Snapshot(ctx context.Context) (out cache.Immutable
}
if err := filesync.FSSync(ctx, caller, opt); err != nil {
if status.Code(err) == codes.NotFound {
return nil, errors.Errorf("local source %s not enabled from the client", ls.src.Name)
}
return nil, err
}

View file

@ -87,6 +87,7 @@ func (l *CapList) CapSet(caps []pb.APICap) CapSet {
m := make(map[string]*pb.APICap, len(caps))
for _, c := range caps {
if c.ID != "" {
c := c // capture loop iterator
m[c.ID] = &c
}
}

View file

@ -4,9 +4,9 @@ github.com/pkg/errors v0.8.0
github.com/stretchr/testify v1.1.4
github.com/davecgh/go-spew v1.1.0
github.com/pmezard/go-difflib v1.0.0
golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993
golang.org/x/sys 1b2967e3c290b7c545b3db0deeda16e9be4f98a2
github.com/containerd/containerd b41633746ed4833f52c3c071e8edcfa2713e5677
github.com/containerd/containerd a88b6319614de846458750ff882723479ca7b1a1
github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
github.com/sirupsen/logrus v1.0.0
@ -39,7 +39,7 @@ golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
github.com/docker/docker 71cd53e4a197b303c6ba086bd584ffd67a884281
github.com/pkg/profile 5b67d428864e92711fcbd2f8629456121a56d91f
github.com/tonistiigi/fsutil 8abad97ee3969cdf5e9c367f46adba2c212b3ddb
github.com/tonistiigi/fsutil b19464cd1b6a00773b4f2eb7acf9c30426f9df42
github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git
github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b

100
vendor/github.com/moby/buildkit/worker/cacheresult.go generated vendored Normal file
View file

@ -0,0 +1,100 @@
package worker
import (
"context"
"strings"
"time"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/solver"
"github.com/pkg/errors"
)
func NewCacheResultStorage(wc *Controller) solver.CacheResultStorage {
return &cacheResultStorage{
wc: wc,
}
}
type cacheResultStorage struct {
wc *Controller
}
func (s *cacheResultStorage) Save(res solver.Result) (solver.CacheResult, error) {
ref, ok := res.Sys().(*WorkerRef)
if !ok {
return solver.CacheResult{}, errors.Errorf("invalid result: %T", res.Sys())
}
if ref.ImmutableRef != nil {
if !cache.HasCachePolicyRetain(ref.ImmutableRef) {
if err := cache.CachePolicyRetain(ref.ImmutableRef); err != nil {
return solver.CacheResult{}, err
}
ref.ImmutableRef.Metadata().Commit()
}
}
return solver.CacheResult{ID: ref.ID(), CreatedAt: time.Now()}, nil
}
func (s *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult) (solver.Result, error) {
return s.load(res.ID)
}
func (s *cacheResultStorage) getWorkerRef(id string) (Worker, string, error) {
workerID, refID, err := parseWorkerRef(id)
if err != nil {
return nil, "", err
}
w, err := s.wc.Get(workerID)
if err != nil {
return nil, "", err
}
return w, refID, nil
}
func (s *cacheResultStorage) load(id string) (solver.Result, error) {
w, refID, err := s.getWorkerRef(id)
if err != nil {
return nil, err
}
if refID == "" {
return NewWorkerRefResult(nil, w), nil
}
ref, err := w.LoadRef(refID)
if err != nil {
return nil, err
}
return NewWorkerRefResult(ref, w), nil
}
func (s *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheResult) (*solver.Remote, error) {
w, refID, err := s.getWorkerRef(res.ID)
if err != nil {
return nil, err
}
ref, err := w.LoadRef(refID)
if err != nil {
return nil, err
}
defer ref.Release(context.TODO())
remote, err := w.GetRemote(ctx, ref, false)
if err != nil {
return nil, nil // ignore error. loadRemote is best effort
}
return remote, nil
}
func (s *cacheResultStorage) Exists(id string) bool {
ref, err := s.load(id)
if err != nil {
return false
}
ref.Release(context.TODO())
return true
}
func parseWorkerRef(id string) (string, string, error) {
parts := strings.Split(id, "::")
if len(parts) != 2 {
return "", "", errors.Errorf("invalid workerref id: %s", id)
}
return parts[0], parts[1], nil
}

View file

@ -9,6 +9,7 @@ import (
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/exporter"
"github.com/moby/buildkit/frontend"
gw "github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/solver"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go/v1"
@ -22,12 +23,12 @@ type Worker interface {
LoadRef(id string) (cache.ImmutableRef, error)
// ResolveOp resolves Vertex.Sys() to Op implementation.
ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge) (solver.Op, error)
ResolveImageConfig(ctx context.Context, ref string, platform *specs.Platform) (digest.Digest, []byte, error)
ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error)
// Exec is similar to executor.Exec but without []mount.Mount
Exec(ctx context.Context, meta executor.Meta, rootFS cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error
DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error)
Exporter(name string) (exporter.Exporter, error)
Prune(ctx context.Context, ch chan client.UsageInfo) error
Prune(ctx context.Context, ch chan client.UsageInfo, opt client.PruneInfo) error
GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool) (*solver.Remote, error)
FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error)
}

View file

@ -174,7 +174,7 @@ func sameFile(f1, f2 *currentPath) (same bool, retErr error) {
if !ok {
return false, nil
}
ls2, ok := f1.f.Sys().(*Stat)
ls2, ok := f2.f.Sys().(*Stat)
if !ok {
return false, nil
}

View file

@ -175,6 +175,11 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er
}
if rename {
if oldFi.IsDir() != fi.IsDir() {
if err := os.RemoveAll(destPath); err != nil {
return errors.Wrapf(err, "failed to remove %s", destPath)
}
}
if err := os.Rename(newPath, destPath); err != nil {
return errors.Wrapf(err, "failed to rename %s to %s", newPath, destPath)
}

71
vendor/github.com/tonistiigi/fsutil/fs.go generated vendored Normal file
View file

@ -0,0 +1,71 @@
package fsutil
import (
"context"
"io"
"os"
"path"
"path/filepath"
"strings"
"github.com/pkg/errors"
)
type FS interface {
Walk(context.Context, filepath.WalkFunc) error
Open(string) (io.ReadCloser, error)
}
func NewFS(root string, opt *WalkOpt) FS {
return &fs{
root: root,
opt: opt,
}
}
type fs struct {
root string
opt *WalkOpt
}
func (fs *fs) Walk(ctx context.Context, fn filepath.WalkFunc) error {
return Walk(ctx, fs.root, fs.opt, fn)
}
func (fs *fs) Open(p string) (io.ReadCloser, error) {
return os.Open(filepath.Join(fs.root, p))
}
func SubDirFS(fs FS, stat Stat) FS {
return &subDirFS{fs: fs, stat: stat}
}
type subDirFS struct {
fs FS
stat Stat
}
func (fs *subDirFS) Walk(ctx context.Context, fn filepath.WalkFunc) error {
main := &StatInfo{Stat: &fs.stat}
if !main.IsDir() {
return errors.Errorf("fs subdir not mode directory")
}
if main.Name() != fs.stat.Path {
return errors.Errorf("subdir path must be single file")
}
if err := fn(fs.stat.Path, main, nil); err != nil {
return err
}
return fs.fs.Walk(ctx, func(p string, fi os.FileInfo, err error) error {
stat, ok := fi.Sys().(*Stat)
if !ok {
return errors.Wrapf(err, "invalid fileinfo without stat info: %s", p)
}
stat.Path = path.Join(fs.stat.Path, stat.Path)
return fn(filepath.Join(fs.stat.Path, p), &StatInfo{stat}, nil)
})
}
func (fs *subDirFS) Open(p string) (io.ReadCloser, error) {
return fs.fs.Open(strings.TrimPrefix(p, fs.stat.Path+"/"))
}

View file

@ -4,7 +4,6 @@ import (
"context"
"io"
"os"
"path/filepath"
"sync"
"github.com/pkg/errors"
@ -23,11 +22,10 @@ type Stream interface {
Context() context.Context
}
func Send(ctx context.Context, conn Stream, root string, opt *WalkOpt, progressCb func(int, bool)) error {
func Send(ctx context.Context, conn Stream, fs FS, progressCb func(int, bool)) error {
s := &sender{
conn: &syncStream{Stream: conn},
root: root,
opt: opt,
fs: fs,
files: make(map[uint32]string),
progressCb: progressCb,
sendpipeline: make(chan *sendHandle, 128),
@ -42,8 +40,7 @@ type sendHandle struct {
type sender struct {
conn Stream
opt *WalkOpt
root string
fs FS
files map[uint32]string
mu sync.RWMutex
progressCb func(int, bool)
@ -130,7 +127,7 @@ func (s *sender) queue(id uint32) error {
}
func (s *sender) sendFile(h *sendHandle) error {
f, err := os.Open(filepath.Join(s.root, h.path))
f, err := s.fs.Open(h.path)
if err == nil {
defer f.Close()
buf := bufPool.Get().([]byte)
@ -144,7 +141,7 @@ func (s *sender) sendFile(h *sendHandle) error {
func (s *sender) walk(ctx context.Context) error {
var i uint32 = 0
err := Walk(ctx, s.root, s.opt, func(path string, fi os.FileInfo, err error) error {
err := s.fs.Walk(ctx, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}