Преглед на файлове

vendor: update buildkit

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
Tonis Tiigi преди 7 години
родител
ревизия
ed6fd3d95b

+ 2 - 2
vendor.conf

@@ -2,7 +2,6 @@
 github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
 github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
 github.com/Microsoft/hcsshim v0.6.5
 github.com/Microsoft/hcsshim v0.6.5
 github.com/Microsoft/go-winio v0.4.5
 github.com/Microsoft/go-winio v0.4.5
-github.com/moby/buildkit da2b9dc7dab99e824b2b1067ad7d0523e32dd2d9 https://github.com/dmcgowan/buildkit.git
 github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
 github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
 github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
 github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
 github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git
 github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git
@@ -28,6 +27,8 @@ github.com/imdario/mergo 0.2.1
 golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0
 golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0
 
 
 github.com/containerd/continuity 22694c680ee48fb8f50015b44618517e2bde77e8
 github.com/containerd/continuity 22694c680ee48fb8f50015b44618517e2bde77e8
+github.com/moby/buildkit c2dbdeb457ea665699a5d97f79eebfac4ab4726f https://github.com/tonistiigi/buildkit.git
+github.com/tonistiigi/fsutil 1dedf6e90084bd88c4c518a15e68a37ed1370203
 
 
 #get libnetwork packages
 #get libnetwork packages
 github.com/docker/libnetwork 60e002dd61885e1cd909582f00f7eb4da634518a
 github.com/docker/libnetwork 60e002dd61885e1cd909582f00f7eb4da634518a
@@ -107,7 +108,6 @@ google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
 github.com/containerd/containerd 06b9cb35161009dcb7123345749fef02f7cea8e0
 github.com/containerd/containerd 06b9cb35161009dcb7123345749fef02f7cea8e0
 github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
 github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
 github.com/stevvooe/continuity cd7a8e21e2b6f84799f5dd4b65faf49c8d3ee02d
 github.com/stevvooe/continuity cd7a8e21e2b6f84799f5dd4b65faf49c8d3ee02d
-github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb
 
 
 # cluster
 # cluster
 github.com/docker/swarmkit bd7bafb8a61de1f5f23c8215ce7b9ecbcb30ff21
 github.com/docker/swarmkit bd7bafb8a61de1f5f23c8215ce7b9ecbcb30ff21

+ 53 - 7
vendor/github.com/moby/buildkit/README.md

@@ -1,10 +1,16 @@
-### Important: This repository is in an early development phase and not suitable for practical workloads. It does not compare with `docker build` features yet.
+### Important: This repository is in an early development phase
 
 
 [![asciicinema example](https://asciinema.org/a/gPEIEo1NzmDTUu2bEPsUboqmU.png)](https://asciinema.org/a/gPEIEo1NzmDTUu2bEPsUboqmU)
 [![asciicinema example](https://asciinema.org/a/gPEIEo1NzmDTUu2bEPsUboqmU.png)](https://asciinema.org/a/gPEIEo1NzmDTUu2bEPsUboqmU)
 
 
 
 
 ## BuildKit
 ## BuildKit
 
 
+<!-- godoc is mainly for LLB stuff -->
+[![GoDoc](https://godoc.org/github.com/moby/buildkit?status.svg)](https://godoc.org/github.com/moby/buildkit/client/llb)
+[![Build Status](https://travis-ci.org/moby/buildkit.svg?branch=master)](https://travis-ci.org/moby/buildkit)
+[![Go Report Card](https://goreportcard.com/badge/github.com/moby/buildkit)](https://goreportcard.com/report/github.com/moby/buildkit)
+
+
 BuildKit is a toolkit for converting source code to build artifacts in an efficient, expressive and repeatable manner.
 BuildKit is a toolkit for converting source code to build artifacts in an efficient, expressive and repeatable manner.
 
 
 Key features:
 Key features:
@@ -23,7 +29,7 @@ Read the proposal from https://github.com/moby/moby/issues/32925
 
 
 #### Quick start
 #### Quick start
 
 
-BuildKit daemon can be built in two different versions: one that uses [containerd](https://github.com/containerd/containerd) for execution and distribution, and a standalone version that doesn't have other dependencies apart from [runc](https://github.com/opencontainers/runc). We are open for adding more backends. `buildd` is a CLI utility for running the gRPC API. 
+BuildKit daemon can be built in two different versions: one that uses [containerd](https://github.com/containerd/containerd) for execution and distribution, and a standalone version that doesn't have other dependencies apart from [runc](https://github.com/opencontainers/runc). We are open for adding more backends. `buildd` is a CLI utility for serving the gRPC API. 
 
 
 ```bash
 ```bash
 # buildd daemon (choose one)
 # buildd daemon (choose one)
@@ -36,17 +42,15 @@ go build -o buildctl ./cmd/buildctl
 
 
 You can also use `make binaries` that prepares all binaries into the `bin/` directory.
 You can also use `make binaries` that prepares all binaries into the `bin/` directory.
 
 
-The first thing to test could be to try building BuildKit with BuildKit. BuildKit provides a low-level solver format that could be used by multiple build definitions. Preparation work for making the Dockerfile parser reusable as a frontend is tracked in https://github.com/moby/moby/pull/33492. As no frontends have been integrated yet we currently have to use a client library to generate this low-level definition.
-
 `examples/buildkit*` directory contains scripts that define how to build different configurations of BuildKit and its dependencies using the `client` package. Running one of these script generates a protobuf definition of a build graph. Note that the script itself does not execute any steps of the build.
 `examples/buildkit*` directory contains scripts that define how to build different configurations of BuildKit and its dependencies using the `client` package. Running one of these script generates a protobuf definition of a build graph. Note that the script itself does not execute any steps of the build.
 
 
-You can use `buildctl debug dump-llb` to see what data is this definition.
+You can use `buildctl debug dump-llb` to see what data is in this definition. Add `--dot` to generate dot layout.
 
 
 ```bash
 ```bash
 go run examples/buildkit0/buildkit.go | buildctl debug dump-llb | jq .
 go run examples/buildkit0/buildkit.go | buildctl debug dump-llb | jq .
 ```
 ```
 
 
-To start building use `buildctl build` command. The script accepts `--target` flag to choose between `containerd` and `standalone` configurations. In standalone mode BuildKit binaries are built together with `runc`. In containerd mode, the `containerd` binary is built as well from the upstream repo.
+To start building use `buildctl build` command. The example script accepts `--target` flag to choose between `containerd` and `standalone` configurations. In standalone mode BuildKit binaries are built together with `runc`. In containerd mode, the `containerd` binary is built as well from the upstream repo.
 
 
 ```bash
 ```bash
 go run examples/buildkit0/buildkit.go | buildctl build
 go run examples/buildkit0/buildkit.go | buildctl build
@@ -59,10 +63,52 @@ Different versions of the example scripts show different ways of describing the
 - `./examples/buildkit0` - uses only exec operations, defines a full stage per component.
 - `./examples/buildkit0` - uses only exec operations, defines a full stage per component.
 - `./examples/buildkit1` - cloning git repositories has been separated for extra concurrency.
 - `./examples/buildkit1` - cloning git repositories has been separated for extra concurrency.
 - `./examples/buildkit2` - uses git sources directly instead of running `git clone`, allowing better performance and much safer caching.
 - `./examples/buildkit2` - uses git sources directly instead of running `git clone`, allowing better performance and much safer caching.
+- `./examples/buildkit3` - allows using local source files for separate components eg. `./buildkit3 --runc=local | buildctl build --local runc-src=some/local/path`  
+- `./examples/dockerfile2llb` - can be used to convert a Dockerfile to LLB for debugging purposes
+- `./examples/gobuild` - shows how to use nested invocation to generate LLB for Go package internal dependencies
+
+
+#### Examples
+
+##### Starting the buildd daemon:
+
+```
+buildd-standalone --debug --root /var/lib/buildkit
+```
+
+##### Building a Dockerfile:
+
+```
+buildctl build --frontend=dockerfile.v0 --local context=. --local dockerfile=.
+```
+
+`context` and `dockerfile` should point to local directories for build context and Dockerfile location.
+
+
+##### Exporting resulting image to containerd
+
+Containerd version of buildd needs to be used
+
+```
+buildctl build ... --exporter=image --exporter-opt name=docker.io/username/image
+ctr --namespace=buildkit images ls
+```
+
+##### Exporting build result back to client
+
+```
+buildctl build ... --exporter=local --exporter-opt output=path/to/output-dir
+```
+
+#### View build cache
+
+```
+buildctl du -v
+```
 
 
 #### Supported runc version
 #### Supported runc version
 
 
-During development buildkit is tested with the version of runc that is being used by the containerd repository. Please refer to [runc.md](https://github.com/containerd/containerd/blob/3707703a694187c7d08e2f333da6ddd58bcb729d/RUNC.md) for more information.
+During development buildkit is tested with the version of runc that is being used by the containerd repository. Please refer to [runc.md](https://github.com/containerd/containerd/blob/d1e11f17ec7b325f89608dd46c128300b8727d50/RUNC.md) for more information.
 
 
 
 
 #### Contributing
 #### Contributing

+ 22 - 0
vendor/github.com/moby/buildkit/session/context.go

@@ -0,0 +1,22 @@
+package session
+
+import "context"
+
+type contextKeyT string
+
+var contextKey = contextKeyT("buildkit/session-id")
+
+func NewContext(ctx context.Context, id string) context.Context {
+	if id != "" {
+		return context.WithValue(ctx, contextKey, id)
+	}
+	return ctx
+}
+
+func FromContext(ctx context.Context) string {
+	v := ctx.Value(contextKey)
+	if v == nil {
+		return ""
+	}
+	return v.(string)
+}

+ 30 - 6
vendor/github.com/moby/buildkit/session/filesync/diffcopy.go

@@ -1,31 +1,55 @@
 package filesync
 package filesync
 
 
 import (
 import (
+	"os"
 	"time"
 	"time"
 
 
-	"google.golang.org/grpc"
-
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
 	"github.com/tonistiigi/fsutil"
 	"github.com/tonistiigi/fsutil"
+	"google.golang.org/grpc"
 )
 )
 
 
-func sendDiffCopy(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb) error {
+func sendDiffCopy(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb, _map func(*fsutil.Stat) bool) error {
 	return fsutil.Send(stream.Context(), stream, dir, &fsutil.WalkOpt{
 	return fsutil.Send(stream.Context(), stream, dir, &fsutil.WalkOpt{
 		ExcludePatterns: excludes,
 		ExcludePatterns: excludes,
-		IncludePaths:    includes, // TODO: rename IncludePatterns
+		IncludePatterns: includes,
+		Map:             _map,
 	}, progress)
 	}, progress)
 }
 }
 
 
-func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater) error {
+func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progressCb) error {
 	st := time.Now()
 	st := time.Now()
 	defer func() {
 	defer func() {
 		logrus.Debugf("diffcopy took: %v", time.Since(st))
 		logrus.Debugf("diffcopy took: %v", time.Since(st))
 	}()
 	}()
 	var cf fsutil.ChangeFunc
 	var cf fsutil.ChangeFunc
+	var ch fsutil.ContentHasher
 	if cu != nil {
 	if cu != nil {
 		cu.MarkSupported(true)
 		cu.MarkSupported(true)
 		cf = cu.HandleChange
 		cf = cu.HandleChange
+		ch = cu.ContentHasher()
 	}
 	}
+	return fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{
+		NotifyHashed:  cf,
+		ContentHasher: ch,
+		ProgressCb:    progress,
+	})
+}
 
 
-	return fsutil.Receive(ds.Context(), ds, dest, cf)
+func syncTargetDiffCopy(ds grpc.Stream, dest string) error {
+	if err := os.MkdirAll(dest, 0700); err != nil {
+		return err
+	}
+	return fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{
+		Merge: true,
+		Filter: func() func(*fsutil.Stat) bool {
+			uid := os.Getuid()
+			gid := os.Getgid()
+			return func(st *fsutil.Stat) bool {
+				st.Uid = uint32(uid)
+				st.Gid = uint32(gid)
+				return true
+			}
+		}(),
+	})
 }
 }

+ 75 - 17
vendor/github.com/moby/buildkit/session/filesync/filesync.go

@@ -1,6 +1,7 @@
 package filesync
 package filesync
 
 
 import (
 import (
+	"fmt"
 	"os"
 	"os"
 	"strings"
 	"strings"
 
 
@@ -15,20 +16,29 @@ import (
 const (
 const (
 	keyOverrideExcludes = "override-excludes"
 	keyOverrideExcludes = "override-excludes"
 	keyIncludePatterns  = "include-patterns"
 	keyIncludePatterns  = "include-patterns"
+	keyDirName          = "dir-name"
 )
 )
 
 
 type fsSyncProvider struct {
 type fsSyncProvider struct {
-	root     string
-	excludes []string
-	p        progressCb
-	doneCh   chan error
+	dirs   map[string]SyncedDir
+	p      progressCb
+	doneCh chan error
+}
+
+type SyncedDir struct {
+	Name     string
+	Dir      string
+	Excludes []string
+	Map      func(*fsutil.Stat) bool
 }
 }
 
 
 // NewFSSyncProvider creates a new provider for sending files from client
 // NewFSSyncProvider creates a new provider for sending files from client
-func NewFSSyncProvider(root string, excludes []string) session.Attachable {
+func NewFSSyncProvider(dirs []SyncedDir) session.Attachable {
 	p := &fsSyncProvider{
 	p := &fsSyncProvider{
-		root:     root,
-		excludes: excludes,
+		dirs: map[string]SyncedDir{},
+	}
+	for _, d := range dirs {
+		p.dirs[d.Name] = d
 	}
 	}
 	return p
 	return p
 }
 }
@@ -58,9 +68,19 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) error
 
 
 	opts, _ := metadata.FromContext(stream.Context()) // if no metadata continue with empty object
 	opts, _ := metadata.FromContext(stream.Context()) // if no metadata continue with empty object
 
 
+	name, ok := opts[keyDirName]
+	if !ok || len(name) != 1 {
+		return errors.New("no dir name in request")
+	}
+
+	dir, ok := sp.dirs[name[0]]
+	if !ok {
+		return errors.Errorf("no access allowed to dir %q", name[0])
+	}
+
 	var excludes []string
 	var excludes []string
 	if len(opts[keyOverrideExcludes]) == 0 || opts[keyOverrideExcludes][0] != "true" {
 	if len(opts[keyOverrideExcludes]) == 0 || opts[keyOverrideExcludes][0] != "true" {
-		excludes = sp.excludes
+		excludes = dir.Excludes
 	}
 	}
 	includes := opts[keyIncludePatterns]
 	includes := opts[keyIncludePatterns]
 
 
@@ -75,7 +95,7 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) error
 		doneCh = sp.doneCh
 		doneCh = sp.doneCh
 		sp.doneCh = nil
 		sp.doneCh = nil
 	}
 	}
-	err := pr.sendFn(stream, sp.root, includes, excludes, progress)
+	err := pr.sendFn(stream, dir.Dir, includes, excludes, progress, dir.Map)
 	if doneCh != nil {
 	if doneCh != nil {
 		if err != nil {
 		if err != nil {
 			doneCh <- err
 			doneCh <- err
@@ -94,8 +114,8 @@ type progressCb func(int, bool)
 
 
 type protocol struct {
 type protocol struct {
 	name   string
 	name   string
-	sendFn func(stream grpc.Stream, srcDir string, includes, excludes []string, progress progressCb) error
-	recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater) error
+	sendFn func(stream grpc.Stream, srcDir string, includes, excludes []string, progress progressCb, _map func(*fsutil.Stat) bool) error
+	recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater, progress progressCb) error
 }
 }
 
 
 func isProtoSupported(p string) bool {
 func isProtoSupported(p string) bool {
@@ -112,25 +132,23 @@ var supportedProtocols = []protocol{
 		sendFn: sendDiffCopy,
 		sendFn: sendDiffCopy,
 		recvFn: recvDiffCopy,
 		recvFn: recvDiffCopy,
 	},
 	},
-	{
-		name:   "tarstream",
-		sendFn: sendTarStream,
-		recvFn: recvTarStream,
-	},
 }
 }
 
 
 // FSSendRequestOpt defines options for FSSend request
 // FSSendRequestOpt defines options for FSSend request
 type FSSendRequestOpt struct {
 type FSSendRequestOpt struct {
+	Name             string
 	IncludePatterns  []string
 	IncludePatterns  []string
 	OverrideExcludes bool
 	OverrideExcludes bool
 	DestDir          string
 	DestDir          string
 	CacheUpdater     CacheUpdater
 	CacheUpdater     CacheUpdater
+	ProgressCb       func(int, bool)
 }
 }
 
 
 // CacheUpdater is an object capable of sending notifications for the cache hash changes
 // CacheUpdater is an object capable of sending notifications for the cache hash changes
 type CacheUpdater interface {
 type CacheUpdater interface {
 	MarkSupported(bool)
 	MarkSupported(bool)
 	HandleChange(fsutil.ChangeKind, string, os.FileInfo, error) error
 	HandleChange(fsutil.ChangeKind, string, os.FileInfo, error) error
+	ContentHasher() fsutil.ContentHasher
 }
 }
 
 
 // FSSync initializes a transfer of files
 // FSSync initializes a transfer of files
@@ -155,6 +173,8 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error {
 		opts[keyIncludePatterns] = opt.IncludePatterns
 		opts[keyIncludePatterns] = opt.IncludePatterns
 	}
 	}
 
 
+	opts[keyDirName] = []string{opt.Name}
+
 	ctx, cancel := context.WithCancel(ctx)
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
 	defer cancel()
 
 
@@ -177,7 +197,45 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error {
 			return err
 			return err
 		}
 		}
 		stream = cc
 		stream = cc
+	default:
+		panic(fmt.Sprintf("invalid protocol: %q", pr.name))
+	}
+
+	return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater, opt.ProgressCb)
+}
+
+// NewFSSyncTarget allows writing into a directory
+func NewFSSyncTarget(outdir string) session.Attachable {
+	p := &fsSyncTarget{
+		outdir: outdir,
+	}
+	return p
+}
+
+type fsSyncTarget struct {
+	outdir string
+}
+
+func (sp *fsSyncTarget) Register(server *grpc.Server) {
+	RegisterFileSendServer(server, sp)
+}
+
+func (sp *fsSyncTarget) DiffCopy(stream FileSend_DiffCopyServer) error {
+	return syncTargetDiffCopy(stream, sp.outdir)
+}
+
+func CopyToCaller(ctx context.Context, srcPath string, c session.Caller, progress func(int, bool)) error {
+	method := session.MethodURL(_FileSend_serviceDesc.ServiceName, "diffcopy")
+	if !c.Supports(method) {
+		return errors.Errorf("method %s not supported by the client", method)
+	}
+
+	client := NewFileSendClient(c.Conn())
+
+	cc, err := client.DiffCopy(ctx)
+	if err != nil {
+		return err
 	}
 	}
 
 
-	return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater)
+	return sendDiffCopy(cc, srcPath, nil, nil, progress, nil)
 }
 }

+ 103 - 7
vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go

@@ -277,6 +277,102 @@ var _FileSync_serviceDesc = grpc.ServiceDesc{
 	Metadata: "filesync.proto",
 	Metadata: "filesync.proto",
 }
 }
 
 
+// Client API for FileSend service
+
+type FileSendClient interface {
+	DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSend_DiffCopyClient, error)
+}
+
+type fileSendClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewFileSendClient(cc *grpc.ClientConn) FileSendClient {
+	return &fileSendClient{cc}
+}
+
+func (c *fileSendClient) DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSend_DiffCopyClient, error) {
+	stream, err := grpc.NewClientStream(ctx, &_FileSend_serviceDesc.Streams[0], c.cc, "/moby.filesync.v1.FileSend/DiffCopy", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &fileSendDiffCopyClient{stream}
+	return x, nil
+}
+
+type FileSend_DiffCopyClient interface {
+	Send(*BytesMessage) error
+	Recv() (*BytesMessage, error)
+	grpc.ClientStream
+}
+
+type fileSendDiffCopyClient struct {
+	grpc.ClientStream
+}
+
+func (x *fileSendDiffCopyClient) Send(m *BytesMessage) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *fileSendDiffCopyClient) Recv() (*BytesMessage, error) {
+	m := new(BytesMessage)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+// Server API for FileSend service
+
+type FileSendServer interface {
+	DiffCopy(FileSend_DiffCopyServer) error
+}
+
+func RegisterFileSendServer(s *grpc.Server, srv FileSendServer) {
+	s.RegisterService(&_FileSend_serviceDesc, srv)
+}
+
+func _FileSend_DiffCopy_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(FileSendServer).DiffCopy(&fileSendDiffCopyServer{stream})
+}
+
+type FileSend_DiffCopyServer interface {
+	Send(*BytesMessage) error
+	Recv() (*BytesMessage, error)
+	grpc.ServerStream
+}
+
+type fileSendDiffCopyServer struct {
+	grpc.ServerStream
+}
+
+func (x *fileSendDiffCopyServer) Send(m *BytesMessage) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *fileSendDiffCopyServer) Recv() (*BytesMessage, error) {
+	m := new(BytesMessage)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+var _FileSend_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "moby.filesync.v1.FileSend",
+	HandlerType: (*FileSendServer)(nil),
+	Methods:     []grpc.MethodDesc{},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "DiffCopy",
+			Handler:       _FileSend_DiffCopy_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
+	Metadata: "filesync.proto",
+}
+
 func (m *BytesMessage) Marshal() (dAtA []byte, err error) {
 func (m *BytesMessage) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	size := m.Size()
 	dAtA = make([]byte, size)
 	dAtA = make([]byte, size)
@@ -558,7 +654,7 @@ var (
 func init() { proto.RegisterFile("filesync.proto", fileDescriptorFilesync) }
 func init() { proto.RegisterFile("filesync.proto", fileDescriptorFilesync) }
 
 
 var fileDescriptorFilesync = []byte{
 var fileDescriptorFilesync = []byte{
-	// 198 bytes of a gzipped FileDescriptorProto
+	// 208 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0xcb, 0xcc, 0x49,
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0xcb, 0xcc, 0x49,
 	0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa,
 	0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa,
 	0xd4, 0x83, 0x0b, 0x96, 0x19, 0x2a, 0x29, 0x71, 0xf1, 0x38, 0x55, 0x96, 0xa4, 0x16, 0xfb, 0xa6,
 	0xd4, 0x83, 0x0b, 0x96, 0x19, 0x2a, 0x29, 0x71, 0xf1, 0x38, 0x55, 0x96, 0xa4, 0x16, 0xfb, 0xa6,
@@ -566,10 +662,10 @@ var fileDescriptorFilesync = []byte{
 	0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x46, 0xab, 0x19, 0xb9, 0x38, 0xdc, 0x32, 0x73, 0x52, 0x83,
 	0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x46, 0xab, 0x19, 0xb9, 0x38, 0xdc, 0x32, 0x73, 0x52, 0x83,
 	0x2b, 0xf3, 0x92, 0x85, 0xfc, 0xb8, 0x38, 0x5c, 0x32, 0xd3, 0xd2, 0x9c, 0xf3, 0x0b, 0x2a, 0x85,
 	0x2b, 0xf3, 0x92, 0x85, 0xfc, 0xb8, 0x38, 0x5c, 0x32, 0xd3, 0xd2, 0x9c, 0xf3, 0x0b, 0x2a, 0x85,
 	0xe4, 0xf4, 0xd0, 0xcd, 0xd3, 0x43, 0x36, 0x4c, 0x8a, 0x80, 0xbc, 0x06, 0xa3, 0x01, 0xa3, 0x90,
 	0xe4, 0xf4, 0xd0, 0xcd, 0xd3, 0x43, 0x36, 0x4c, 0x8a, 0x80, 0xbc, 0x06, 0xa3, 0x01, 0xa3, 0x90,
-	0x3f, 0x17, 0x67, 0x48, 0x62, 0x51, 0x70, 0x49, 0x51, 0x6a, 0x62, 0x2e, 0x35, 0x0c, 0x74, 0x32,
-	0xbb, 0xf0, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9,
-	0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e,
-	0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x51,
-	0x1c, 0x30, 0xb3, 0x92, 0xd8, 0xc0, 0x41, 0x64, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x0c,
-	0x8d, 0xc5, 0x34, 0x01, 0x00, 0x00,
+	0x3f, 0x17, 0x67, 0x48, 0x62, 0x51, 0x70, 0x49, 0x51, 0x6a, 0x62, 0x2e, 0x35, 0x0c, 0x34, 0x8a,
+	0x82, 0x3a, 0x36, 0x35, 0x2f, 0x85, 0xda, 0x8e, 0x75, 0x32, 0xbb, 0xf0, 0x50, 0x8e, 0xe1, 0xc6,
+	0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78,
+	0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7,
+	0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x51, 0x1c, 0x30, 0xb3, 0x92, 0xd8, 0xc0,
+	0xc1, 0x6f, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x72, 0x81, 0x1a, 0x91, 0x90, 0x01, 0x00, 0x00,
 }
 }

+ 5 - 0
vendor/github.com/moby/buildkit/session/filesync/filesync.proto

@@ -9,6 +9,11 @@ service FileSync{
   rpc TarStream(stream BytesMessage) returns (stream BytesMessage);
   rpc TarStream(stream BytesMessage) returns (stream BytesMessage);
 }
 }
 
 
+service FileSend{
+  rpc DiffCopy(stream BytesMessage) returns (stream BytesMessage);
+}
+
+
 // BytesMessage contains a chunk of byte data
 // BytesMessage contains a chunk of byte data
 message BytesMessage{
 message BytesMessage{
 	bytes data = 1;
 	bytes data = 1;

+ 0 - 83
vendor/github.com/moby/buildkit/session/filesync/tarstream.go

@@ -1,83 +0,0 @@
-package filesync
-
-import (
-	"io"
-
-	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/chrootarchive"
-	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
-	"google.golang.org/grpc"
-)
-
-func sendTarStream(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb) error {
-	a, err := archive.TarWithOptions(dir, &archive.TarOptions{
-		ExcludePatterns: excludes,
-	})
-	if err != nil {
-		return err
-	}
-
-	size := 0
-	buf := make([]byte, 1<<15)
-	t := new(BytesMessage)
-	for {
-		n, err := a.Read(buf)
-		if err != nil {
-			if err == io.EOF {
-				break
-			}
-			return err
-		}
-		t.Data = buf[:n]
-
-		if err := stream.SendMsg(t); err != nil {
-			return err
-		}
-		size += n
-		if progress != nil {
-			progress(size, false)
-		}
-	}
-	if progress != nil {
-		progress(size, true)
-	}
-	return nil
-}
-
-func recvTarStream(ds grpc.Stream, dest string, cs CacheUpdater) error {
-
-	pr, pw := io.Pipe()
-
-	go func() {
-		var (
-			err error
-			t   = new(BytesMessage)
-		)
-		for {
-			if err = ds.RecvMsg(t); err != nil {
-				if err == io.EOF {
-					err = nil
-				}
-				break
-			}
-			_, err = pw.Write(t.Data)
-			if err != nil {
-				break
-			}
-		}
-		if err = pw.CloseWithError(err); err != nil {
-			logrus.Errorf("failed to close tar transfer pipe")
-		}
-	}()
-
-	decompressedStream, err := archive.DecompressStream(pr)
-	if err != nil {
-		return errors.Wrap(err, "failed to decompress stream")
-	}
-
-	if err := chrootarchive.Untar(decompressedStream, dest, nil); err != nil {
-		return errors.Wrap(err, "failed to untar context")
-	}
-	return nil
-}

+ 21 - 11
vendor/github.com/moby/buildkit/session/manager.go

@@ -49,14 +49,14 @@ func (sm *Manager) HandleHTTPRequest(ctx context.Context, w http.ResponseWriter,
 		return errors.New("handler does not support hijack")
 		return errors.New("handler does not support hijack")
 	}
 	}
 
 
-	uuid := r.Header.Get(headerSessionUUID)
+	id := r.Header.Get(headerSessionID)
 
 
 	proto := r.Header.Get("Upgrade")
 	proto := r.Header.Get("Upgrade")
 
 
 	sm.mu.Lock()
 	sm.mu.Lock()
-	if _, ok := sm.sessions[uuid]; ok {
+	if _, ok := sm.sessions[id]; ok {
 		sm.mu.Unlock()
 		sm.mu.Unlock()
-		return errors.Errorf("session %s already exists", uuid)
+		return errors.Errorf("session %s already exists", id)
 	}
 	}
 
 
 	if proto == "" {
 	if proto == "" {
@@ -102,8 +102,10 @@ func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[strin
 	ctx, cancel := context.WithCancel(ctx)
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
 	defer cancel()
 
 
+	opts = canonicalHeaders(opts)
+
 	h := http.Header(opts)
 	h := http.Header(opts)
-	uuid := h.Get(headerSessionUUID)
+	id := h.Get(headerSessionID)
 	name := h.Get(headerSessionName)
 	name := h.Get(headerSessionName)
 	sharedKey := h.Get(headerSessionSharedKey)
 	sharedKey := h.Get(headerSessionSharedKey)
 
 
@@ -115,7 +117,7 @@ func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[strin
 
 
 	c := &client{
 	c := &client{
 		Session: Session{
 		Session: Session{
-			uuid:      uuid,
+			id:        id,
 			name:      name,
 			name:      name,
 			sharedKey: sharedKey,
 			sharedKey: sharedKey,
 			ctx:       ctx,
 			ctx:       ctx,
@@ -129,13 +131,13 @@ func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[strin
 	for _, m := range opts[headerSessionMethod] {
 	for _, m := range opts[headerSessionMethod] {
 		c.supported[strings.ToLower(m)] = struct{}{}
 		c.supported[strings.ToLower(m)] = struct{}{}
 	}
 	}
-	sm.sessions[uuid] = c
+	sm.sessions[id] = c
 	sm.updateCondition.Broadcast()
 	sm.updateCondition.Broadcast()
 	sm.mu.Unlock()
 	sm.mu.Unlock()
 
 
 	defer func() {
 	defer func() {
 		sm.mu.Lock()
 		sm.mu.Lock()
-		delete(sm.sessions, uuid)
+		delete(sm.sessions, id)
 		sm.mu.Unlock()
 		sm.mu.Unlock()
 	}()
 	}()
 
 
@@ -146,8 +148,8 @@ func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[strin
 	return nil
 	return nil
 }
 }
 
 
-// Get returns a session by UUID
-func (sm *Manager) Get(ctx context.Context, uuid string) (Caller, error) {
+// Get returns a session by ID
+func (sm *Manager) Get(ctx context.Context, id string) (Caller, error) {
 	ctx, cancel := context.WithCancel(ctx)
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
 	defer cancel()
 
 
@@ -165,11 +167,11 @@ func (sm *Manager) Get(ctx context.Context, uuid string) (Caller, error) {
 		select {
 		select {
 		case <-ctx.Done():
 		case <-ctx.Done():
 			sm.mu.Unlock()
 			sm.mu.Unlock()
-			return nil, errors.Wrapf(ctx.Err(), "no active session for %s", uuid)
+			return nil, errors.Wrapf(ctx.Err(), "no active session for %s", id)
 		default:
 		default:
 		}
 		}
 		var ok bool
 		var ok bool
-		c, ok = sm.sessions[uuid]
+		c, ok = sm.sessions[id]
 		if !ok || c.closed() {
 		if !ok || c.closed() {
 			sm.updateCondition.Wait()
 			sm.updateCondition.Wait()
 			continue
 			continue
@@ -200,3 +202,11 @@ func (c *client) Supports(url string) bool {
 func (c *client) Conn() *grpc.ClientConn {
 func (c *client) Conn() *grpc.ClientConn {
 	return c.cc
 	return c.cc
 }
 }
+
+func canonicalHeaders(in map[string][]string) map[string][]string {
+	out := map[string][]string{}
+	for k := range in {
+		out[http.CanonicalHeaderKey(k)] = in[k]
+	}
+	return out
+}

+ 9 - 8
vendor/github.com/moby/buildkit/session/session.go

@@ -12,7 +12,7 @@ import (
 )
 )
 
 
 const (
 const (
-	headerSessionUUID      = "X-Docker-Expose-Session-Uuid"
+	headerSessionID        = "X-Docker-Expose-Session-Uuid"
 	headerSessionName      = "X-Docker-Expose-Session-Name"
 	headerSessionName      = "X-Docker-Expose-Session-Name"
 	headerSessionSharedKey = "X-Docker-Expose-Session-Sharedkey"
 	headerSessionSharedKey = "X-Docker-Expose-Session-Sharedkey"
 	headerSessionMethod    = "X-Docker-Expose-Session-Grpc-Method"
 	headerSessionMethod    = "X-Docker-Expose-Session-Grpc-Method"
@@ -28,7 +28,7 @@ type Attachable interface {
 
 
 // Session is a long running connection between client and a daemon
 // Session is a long running connection between client and a daemon
 type Session struct {
 type Session struct {
-	uuid       string
+	id         string
 	name       string
 	name       string
 	sharedKey  string
 	sharedKey  string
 	ctx        context.Context
 	ctx        context.Context
@@ -39,9 +39,9 @@ type Session struct {
 
 
 // NewSession returns a new long running session
 // NewSession returns a new long running session
 func NewSession(name, sharedKey string) (*Session, error) {
 func NewSession(name, sharedKey string) (*Session, error) {
-	uuid := stringid.GenerateRandomID()
+	id := stringid.GenerateRandomID()
 	s := &Session{
 	s := &Session{
-		uuid:       uuid,
+		id:         id,
 		name:       name,
 		name:       name,
 		sharedKey:  sharedKey,
 		sharedKey:  sharedKey,
 		grpcServer: grpc.NewServer(),
 		grpcServer: grpc.NewServer(),
@@ -57,9 +57,9 @@ func (s *Session) Allow(a Attachable) {
 	a.Register(s.grpcServer)
 	a.Register(s.grpcServer)
 }
 }
 
 
-// UUID returns unique identifier for the session
-func (s *Session) UUID() string {
-	return s.uuid
+// ID returns unique identifier for the session
+func (s *Session) ID() string {
+	return s.id
 }
 }
 
 
 // Run activates the session
 // Run activates the session
@@ -72,7 +72,7 @@ func (s *Session) Run(ctx context.Context, dialer Dialer) error {
 	defer close(s.done)
 	defer close(s.done)
 
 
 	meta := make(map[string][]string)
 	meta := make(map[string][]string)
-	meta[headerSessionUUID] = []string{s.uuid}
+	meta[headerSessionID] = []string{s.id}
 	meta[headerSessionName] = []string{s.name}
 	meta[headerSessionName] = []string{s.name}
 	meta[headerSessionSharedKey] = []string{s.sharedKey}
 	meta[headerSessionSharedKey] = []string{s.sharedKey}
 
 
@@ -92,6 +92,7 @@ func (s *Session) Run(ctx context.Context, dialer Dialer) error {
 // Close closes the session
 // Close closes the session
 func (s *Session) Close() error {
 func (s *Session) Close() error {
 	if s.cancelCtx != nil && s.done != nil {
 	if s.cancelCtx != nil && s.done != nil {
+		s.grpcServer.Stop()
 		s.cancelCtx()
 		s.cancelCtx()
 		<-s.done
 		<-s.done
 	}
 	}

+ 16 - 10
vendor/github.com/moby/buildkit/vendor.conf

@@ -6,26 +6,26 @@ github.com/davecgh/go-spew v1.1.0
 github.com/pmezard/go-difflib v1.0.0
 github.com/pmezard/go-difflib v1.0.0
 golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
 golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
 
 
-github.com/containerd/containerd 3707703a694187c7d08e2f333da6ddd58bcb729d
-golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
-github.com/Sirupsen/logrus v0.11.0
+github.com/containerd/containerd d1e11f17ec7b325f89608dd46c128300b8727d50
+golang.org/x/sync f52d1811a62927559de87708c8913c1650ce4f26
+github.com/sirupsen/logrus v1.0.0
 google.golang.org/grpc v1.3.0
 google.golang.org/grpc v1.3.0
 github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
 github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
 golang.org/x/net 1f9224279e98554b6a6432d4dd998a739f8b2b7c
 golang.org/x/net 1f9224279e98554b6a6432d4dd998a739f8b2b7c
 github.com/gogo/protobuf d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8
 github.com/gogo/protobuf d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8
 github.com/golang/protobuf 5a0f697c9ed9d68fef0116532c6e05cfeae00e55
 github.com/golang/protobuf 5a0f697c9ed9d68fef0116532c6e05cfeae00e55
 github.com/containerd/continuity 86cec1535a968310e7532819f699ff2830ed7463
 github.com/containerd/continuity 86cec1535a968310e7532819f699ff2830ed7463
-github.com/opencontainers/image-spec v1.0.0-rc6
-github.com/opencontainers/runc 429a5387123625040bacfbb60d96b1cbd02293ab
+github.com/opencontainers/image-spec v1.0.0
+github.com/opencontainers/runc e775f0fba3ea329b8b766451c892c41a3d49594d
 github.com/Microsoft/go-winio v0.4.1
 github.com/Microsoft/go-winio v0.4.1
 github.com/containerd/fifo 69b99525e472735860a5269b75af1970142b3062
 github.com/containerd/fifo 69b99525e472735860a5269b75af1970142b3062
-github.com/opencontainers/runtime-spec 198f23f827eea397d4331d7eb048d9d4c7ff7bee
+github.com/opencontainers/runtime-spec 96de01bbb42c7af89bff100e10a9f0fb62e75bfb
 github.com/containerd/go-runc 2774a2ea124a5c2d0aba13b5c2dd8a5a9a48775d
 github.com/containerd/go-runc 2774a2ea124a5c2d0aba13b5c2dd8a5a9a48775d
 github.com/containerd/console 7fed77e673ca4abcd0cbd6d4d0e0e22137cbd778
 github.com/containerd/console 7fed77e673ca4abcd0cbd6d4d0e0e22137cbd778
-github.com/Azure/go-ansiterm fa152c58bc15761d0200cb75fe958b89a9d4888e
+github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
-github.com/docker/go-events aa2e3b613fbbfdddbe055a7b9e3ce271cfd83eca
+github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
 
 
 github.com/urfave/cli d70f47eeca3afd795160003bc6e28b001d60c67c
 github.com/urfave/cli d70f47eeca3afd795160003bc6e28b001d60c67c
 github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
 github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
@@ -33,8 +33,14 @@ github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716
 golang.org/x/time 8be79e1e0910c292df4e79c241bb7e8f7e725959
 golang.org/x/time 8be79e1e0910c292df4e79c241bb7e8f7e725959
 
 
 github.com/BurntSushi/locker 392720b78f44e9d0249fcac6c43b111b47a370b8
 github.com/BurntSushi/locker 392720b78f44e9d0249fcac6c43b111b47a370b8
-github.com/docker/docker 05c7c311390911daebcf5d9519dee813fc02a887
+github.com/docker/docker 6f723db8c6f0c7f0b252674a9673a25b5978db04 https://github.com/tonistiigi/docker.git
 github.com/pkg/profile 5b67d428864e92711fcbd2f8629456121a56d91f
 github.com/pkg/profile 5b67d428864e92711fcbd2f8629456121a56d91f
 
 
-github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb
+github.com/tonistiigi/fsutil 1dedf6e90084bd88c4c518a15e68a37ed1370203
 github.com/stevvooe/continuity 86cec1535a968310e7532819f699ff2830ed7463
 github.com/stevvooe/continuity 86cec1535a968310e7532819f699ff2830ed7463
+github.com/dmcgowan/go-tar 2e2c51242e8993c50445dab7c03c8e7febddd0cf
+github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git
+github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
+github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
+github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
+github.com/docker/distribution 30578ca32960a4d368bf6db67b0a33c2a1f3dc6f

+ 7 - 0
vendor/github.com/tonistiigi/fsutil/diff.go

@@ -1,6 +1,7 @@
 package fsutil
 package fsutil
 
 
 import (
 import (
+	"hash"
 	"os"
 	"os"
 
 
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
@@ -14,6 +15,8 @@ func Changes(ctx context.Context, a, b walkerFn, changeFn ChangeFunc) error {
 
 
 type HandleChangeFn func(ChangeKind, string, os.FileInfo, error) error
 type HandleChangeFn func(ChangeKind, string, os.FileInfo, error) error
 
 
+type ContentHasher func(*Stat) (hash.Hash, error)
+
 func GetWalkerFn(root string) walkerFn {
 func GetWalkerFn(root string) walkerFn {
 	return func(ctx context.Context, pathC chan<- *currentPath) error {
 	return func(ctx context.Context, pathC chan<- *currentPath) error {
 		return Walk(ctx, root, nil, func(path string, f os.FileInfo, err error) error {
 		return Walk(ctx, root, nil, func(path string, f os.FileInfo, err error) error {
@@ -35,3 +38,7 @@ func GetWalkerFn(root string) walkerFn {
 		})
 		})
 	}
 	}
 }
 }
+
+func emptyWalker(ctx context.Context, pathC chan<- *currentPath) error {
+	return nil
+}

+ 29 - 60
vendor/github.com/tonistiigi/fsutil/diskwriter.go

@@ -1,11 +1,6 @@
-// +build linux windows
-
 package fsutil
 package fsutil
 
 
 import (
 import (
-	"archive/tar"
-	"crypto/sha256"
-	"encoding/hex"
 	"hash"
 	"hash"
 	"io"
 	"io"
 	"os"
 	"os"
@@ -14,8 +9,7 @@ import (
 	"sync"
 	"sync"
 	"time"
 	"time"
 
 
-	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/tarsum"
+	digest "github.com/opencontainers/go-digest"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 	"golang.org/x/sync/errgroup"
 	"golang.org/x/sync/errgroup"
@@ -24,11 +18,15 @@ import (
 type WriteToFunc func(context.Context, string, io.WriteCloser) error
 type WriteToFunc func(context.Context, string, io.WriteCloser) error
 
 
 type DiskWriterOpt struct {
 type DiskWriterOpt struct {
-	AsyncDataCb WriteToFunc
-	SyncDataCb  WriteToFunc
-	NotifyCb    func(ChangeKind, string, os.FileInfo, error) error
+	AsyncDataCb   WriteToFunc
+	SyncDataCb    WriteToFunc
+	NotifyCb      func(ChangeKind, string, os.FileInfo, error) error
+	ContentHasher ContentHasher
+	Filter        FilterFunc
 }
 }
 
 
+type FilterFunc func(*Stat) bool
+
 type DiskWriter struct {
 type DiskWriter struct {
 	opt  DiskWriterOpt
 	opt  DiskWriterOpt
 	dest string
 	dest string
@@ -37,6 +35,7 @@ type DiskWriter struct {
 	ctx    context.Context
 	ctx    context.Context
 	cancel func()
 	cancel func()
 	eg     *errgroup.Group
 	eg     *errgroup.Group
+	filter FilterFunc
 }
 }
 
 
 func NewDiskWriter(ctx context.Context, dest string, opt DiskWriterOpt) (*DiskWriter, error) {
 func NewDiskWriter(ctx context.Context, dest string, opt DiskWriterOpt) (*DiskWriter, error) {
@@ -102,6 +101,12 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er
 		return errors.Errorf("%s invalid change without stat information", p)
 		return errors.Errorf("%s invalid change without stat information", p)
 	}
 	}
 
 
+	if dw.filter != nil {
+		if ok := dw.filter(stat); !ok {
+			return nil
+		}
+	}
+
 	rename := true
 	rename := true
 	oldFi, err := os.Lstat(destPath)
 	oldFi, err := os.Lstat(destPath)
 	if err != nil {
 	if err != nil {
@@ -202,7 +207,7 @@ func (dw *DiskWriter) processChange(kind ChangeKind, p string, fi os.FileInfo, w
 	var hw *hashedWriter
 	var hw *hashedWriter
 	if dw.opt.NotifyCb != nil {
 	if dw.opt.NotifyCb != nil {
 		var err error
 		var err error
-		if hw, err = newHashWriter(p, fi, w); err != nil {
+		if hw, err = newHashWriter(dw.opt.ContentHasher, fi, w); err != nil {
 			return err
 			return err
 		}
 		}
 		w = hw
 		w = hw
@@ -229,13 +234,18 @@ func (dw *DiskWriter) processChange(kind ChangeKind, p string, fi os.FileInfo, w
 type hashedWriter struct {
 type hashedWriter struct {
 	os.FileInfo
 	os.FileInfo
 	io.Writer
 	io.Writer
-	h   hash.Hash
-	w   io.WriteCloser
-	sum string
+	h    hash.Hash
+	w    io.WriteCloser
+	dgst digest.Digest
 }
 }
 
 
-func newHashWriter(p string, fi os.FileInfo, w io.WriteCloser) (*hashedWriter, error) {
-	h, err := NewTarsumHash(p, fi)
+func newHashWriter(ch ContentHasher, fi os.FileInfo, w io.WriteCloser) (*hashedWriter, error) {
+	stat, ok := fi.Sys().(*Stat)
+	if !ok {
+		return nil, errors.Errorf("invalid change without stat information")
+	}
+
+	h, err := ch(stat)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -249,15 +259,15 @@ func newHashWriter(p string, fi os.FileInfo, w io.WriteCloser) (*hashedWriter, e
 }
 }
 
 
 func (hw *hashedWriter) Close() error {
 func (hw *hashedWriter) Close() error {
-	hw.sum = string(hex.EncodeToString(hw.h.Sum(nil)))
+	hw.dgst = digest.NewDigest(digest.SHA256, hw.h)
 	if hw.w != nil {
 	if hw.w != nil {
 		return hw.w.Close()
 		return hw.w.Close()
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
-func (hw *hashedWriter) Hash() string {
-	return hw.sum
+func (hw *hashedWriter) Digest() digest.Digest {
+	return hw.dgst
 }
 }
 
 
 type lazyFileWriter struct {
 type lazyFileWriter struct {
@@ -310,44 +320,3 @@ func nextSuffix() string {
 	randmu.Unlock()
 	randmu.Unlock()
 	return strconv.Itoa(int(1e9 + r%1e9))[1:]
 	return strconv.Itoa(int(1e9 + r%1e9))[1:]
 }
 }
-
-func NewTarsumHash(p string, fi os.FileInfo) (hash.Hash, error) {
-	stat, ok := fi.Sys().(*Stat)
-	link := ""
-	if ok {
-		link = stat.Linkname
-	}
-	if fi.IsDir() {
-		p += string(os.PathSeparator)
-	}
-	h, err := archive.FileInfoHeader(p, fi, link)
-	if err != nil {
-		return nil, err
-	}
-	h.Name = p
-	if ok {
-		h.Uid = int(stat.Uid)
-		h.Gid = int(stat.Gid)
-		h.Linkname = stat.Linkname
-		if stat.Xattrs != nil {
-			h.Xattrs = make(map[string]string)
-			for k, v := range stat.Xattrs {
-				h.Xattrs[k] = string(v)
-			}
-		}
-	}
-	tsh := &tarsumHash{h: h, Hash: sha256.New()}
-	tsh.Reset()
-	return tsh, nil
-}
-
-// Reset resets the Hash to its initial state.
-func (tsh *tarsumHash) Reset() {
-	tsh.Hash.Reset()
-	tarsum.WriteV1Header(tsh.h, tsh.Hash)
-}
-
-type tarsumHash struct {
-	hash.Hash
-	h *tar.Header
-}

+ 7 - 0
vendor/github.com/tonistiigi/fsutil/diskwriter_darwin.go

@@ -0,0 +1,7 @@
+// +build darwin
+
+package fsutil
+
+func chtimes(path string, un int64) error {
+	return nil
+}

+ 0 - 44
vendor/github.com/tonistiigi/fsutil/diskwriter_linux.go

@@ -3,36 +3,10 @@
 package fsutil
 package fsutil
 
 
 import (
 import (
-	"os"
-	"syscall"
-
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
-	"github.com/stevvooe/continuity/sysx"
 	"golang.org/x/sys/unix"
 	"golang.org/x/sys/unix"
 )
 )
 
 
-func rewriteMetadata(p string, stat *Stat) error {
-	for key, value := range stat.Xattrs {
-		sysx.Setxattr(p, key, value, 0)
-	}
-
-	if err := os.Lchown(p, int(stat.Uid), int(stat.Gid)); err != nil {
-		return errors.Wrapf(err, "failed to lchown %s", p)
-	}
-
-	if os.FileMode(stat.Mode)&os.ModeSymlink == 0 {
-		if err := os.Chmod(p, os.FileMode(stat.Mode)); err != nil {
-			return errors.Wrapf(err, "failed to chown %s", p)
-		}
-	}
-
-	if err := chtimes(p, stat.ModTime); err != nil {
-		return errors.Wrapf(err, "failed to chtimes %s", p)
-	}
-
-	return nil
-}
-
 func chtimes(path string, un int64) error {
 func chtimes(path string, un int64) error {
 	var utimes [2]unix.Timespec
 	var utimes [2]unix.Timespec
 	utimes[0] = unix.NsecToTimespec(un)
 	utimes[0] = unix.NsecToTimespec(un)
@@ -44,21 +18,3 @@ func chtimes(path string, un int64) error {
 
 
 	return nil
 	return nil
 }
 }
-
-// handleTarTypeBlockCharFifo is an OS-specific helper function used by
-// createTarFile to handle the following types of header: Block; Char; Fifo
-func handleTarTypeBlockCharFifo(path string, stat *Stat) error {
-	mode := uint32(stat.Mode & 07777)
-	if os.FileMode(stat.Mode)&os.ModeCharDevice != 0 {
-		mode |= syscall.S_IFCHR
-	} else if os.FileMode(stat.Mode)&os.ModeNamedPipe != 0 {
-		mode |= syscall.S_IFIFO
-	} else {
-		mode |= syscall.S_IFBLK
-	}
-
-	if err := syscall.Mknod(path, mode, int(mkdev(stat.Devmajor, stat.Devminor))); err != nil {
-		return err
-	}
-	return nil
-}

+ 51 - 0
vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go

@@ -0,0 +1,51 @@
+// +build !windows
+
+package fsutil
+
+import (
+	"os"
+	"syscall"
+
+	"github.com/pkg/errors"
+	"github.com/stevvooe/continuity/sysx"
+)
+
+func rewriteMetadata(p string, stat *Stat) error {
+	for key, value := range stat.Xattrs {
+		sysx.Setxattr(p, key, value, 0)
+	}
+
+	if err := os.Lchown(p, int(stat.Uid), int(stat.Gid)); err != nil {
+		return errors.Wrapf(err, "failed to lchown %s", p)
+	}
+
+	if os.FileMode(stat.Mode)&os.ModeSymlink == 0 {
+		if err := os.Chmod(p, os.FileMode(stat.Mode)); err != nil {
+			return errors.Wrapf(err, "failed to chown %s", p)
+		}
+	}
+
+	if err := chtimes(p, stat.ModTime); err != nil {
+		return errors.Wrapf(err, "failed to chtimes %s", p)
+	}
+
+	return nil
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(path string, stat *Stat) error {
+	mode := uint32(stat.Mode & 07777)
+	if os.FileMode(stat.Mode)&os.ModeCharDevice != 0 {
+		mode |= syscall.S_IFCHR
+	} else if os.FileMode(stat.Mode)&os.ModeNamedPipe != 0 {
+		mode |= syscall.S_IFIFO
+	} else {
+		mode |= syscall.S_IFBLK
+	}
+
+	if err := syscall.Mknod(path, mode, int(mkdev(stat.Devmajor, stat.Devminor))); err != nil {
+		return err
+	}
+	return nil
+}

+ 48 - 17
vendor/github.com/tonistiigi/fsutil/receive.go

@@ -1,5 +1,3 @@
-// +build linux windows
-
 package fsutil
 package fsutil
 
 
 import (
 import (
@@ -12,29 +10,45 @@ import (
 	"golang.org/x/sync/errgroup"
 	"golang.org/x/sync/errgroup"
 )
 )
 
 
-func Receive(ctx context.Context, conn Stream, dest string, notifyHashed ChangeFunc) error {
+type ReceiveOpt struct {
+	NotifyHashed  ChangeFunc
+	ContentHasher ContentHasher
+	ProgressCb    func(int, bool)
+	Merge         bool
+	Filter        FilterFunc
+}
+
+func Receive(ctx context.Context, conn Stream, dest string, opt ReceiveOpt) error {
 	ctx, cancel := context.WithCancel(context.Background())
 	ctx, cancel := context.WithCancel(context.Background())
 	defer cancel()
 	defer cancel()
 
 
 	r := &receiver{
 	r := &receiver{
-		conn:         &syncStream{Stream: conn},
-		dest:         dest,
-		files:        make(map[string]uint32),
-		pipes:        make(map[uint32]io.WriteCloser),
-		notifyHashed: notifyHashed,
+		conn:          &syncStream{Stream: conn},
+		dest:          dest,
+		files:         make(map[string]uint32),
+		pipes:         make(map[uint32]io.WriteCloser),
+		notifyHashed:  opt.NotifyHashed,
+		contentHasher: opt.ContentHasher,
+		progressCb:    opt.ProgressCb,
+		merge:         opt.Merge,
+		filter:        opt.Filter,
 	}
 	}
 	return r.run(ctx)
 	return r.run(ctx)
 }
 }
 
 
 type receiver struct {
 type receiver struct {
-	dest    string
-	conn    Stream
-	files   map[string]uint32
-	pipes   map[uint32]io.WriteCloser
-	mu      sync.RWMutex
-	muPipes sync.RWMutex
+	dest       string
+	conn       Stream
+	files      map[string]uint32
+	pipes      map[uint32]io.WriteCloser
+	mu         sync.RWMutex
+	muPipes    sync.RWMutex
+	progressCb func(int, bool)
+	merge      bool
+	filter     FilterFunc
 
 
 	notifyHashed   ChangeFunc
 	notifyHashed   ChangeFunc
+	contentHasher  ContentHasher
 	orderValidator Validator
 	orderValidator Validator
 	hlValidator    Hardlinks
 	hlValidator    Hardlinks
 }
 }
@@ -81,8 +95,10 @@ func (r *receiver) run(ctx context.Context) error {
 	g, ctx := errgroup.WithContext(ctx)
 	g, ctx := errgroup.WithContext(ctx)
 
 
 	dw, err := NewDiskWriter(ctx, r.dest, DiskWriterOpt{
 	dw, err := NewDiskWriter(ctx, r.dest, DiskWriterOpt{
-		AsyncDataCb: r.asyncDataFunc,
-		NotifyCb:    r.notifyHashed,
+		AsyncDataCb:   r.asyncDataFunc,
+		NotifyCb:      r.notifyHashed,
+		ContentHasher: r.contentHasher,
+		Filter:        r.filter,
 	})
 	})
 	if err != nil {
 	if err != nil {
 		return err
 		return err
@@ -91,7 +107,11 @@ func (r *receiver) run(ctx context.Context) error {
 	w := newDynamicWalker()
 	w := newDynamicWalker()
 
 
 	g.Go(func() error {
 	g.Go(func() error {
-		err := doubleWalkDiff(ctx, dw.HandleChange, GetWalkerFn(r.dest), w.fill)
+		destWalker := emptyWalker
+		if !r.merge {
+			destWalker = GetWalkerFn(r.dest)
+		}
+		err := doubleWalkDiff(ctx, dw.HandleChange, destWalker, w.fill)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
@@ -105,12 +125,23 @@ func (r *receiver) run(ctx context.Context) error {
 	g.Go(func() error {
 	g.Go(func() error {
 		var i uint32 = 0
 		var i uint32 = 0
 
 
+		size := 0
+		if r.progressCb != nil {
+			defer func() {
+				r.progressCb(size, true)
+			}()
+		}
 		var p Packet
 		var p Packet
 		for {
 		for {
 			p = Packet{Data: p.Data[:0]}
 			p = Packet{Data: p.Data[:0]}
 			if err := r.conn.RecvMsg(&p); err != nil {
 			if err := r.conn.RecvMsg(&p); err != nil {
 				return err
 				return err
 			}
 			}
+			if r.progressCb != nil {
+				size += p.Size()
+				r.progressCb(size, false)
+			}
+
 			switch p.Type {
 			switch p.Type {
 			case PACKET_STAT:
 			case PACKET_STAT:
 				if p.Stat == nil {
 				if p.Stat == nil {

+ 0 - 14
vendor/github.com/tonistiigi/fsutil/receive_unsupported.go

@@ -1,14 +0,0 @@
-// +build !linux,!windows
-
-package fsutil
-
-import (
-	"runtime"
-
-	"github.com/pkg/errors"
-	"golang.org/x/net/context"
-)
-
-func Receive(ctx context.Context, conn Stream, dest string, notifyHashed ChangeFunc) error {
-	return errors.Errorf("receive is unsupported in %s", runtime.GOOS)
-}

+ 11 - 7
vendor/github.com/tonistiigi/fsutil/validator.go

@@ -2,7 +2,8 @@ package fsutil
 
 
 import (
 import (
 	"os"
 	"os"
-	"path/filepath"
+	"path"
+	"runtime"
 	"sort"
 	"sort"
 	"strings"
 	"strings"
 
 
@@ -26,14 +27,17 @@ func (v *Validator) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err
 	if v.parentDirs == nil {
 	if v.parentDirs == nil {
 		v.parentDirs = make([]parent, 1, 10)
 		v.parentDirs = make([]parent, 1, 10)
 	}
 	}
-	if p != filepath.Clean(p) {
+	if runtime.GOOS == "windows" {
+		p = strings.Replace(p, "\\", "", -1)
+	}
+	if p != path.Clean(p) {
 		return errors.Errorf("invalid unclean path %s", p)
 		return errors.Errorf("invalid unclean path %s", p)
 	}
 	}
-	if filepath.IsAbs(p) {
+	if path.IsAbs(p) {
 		return errors.Errorf("abolute path %s not allowed", p)
 		return errors.Errorf("abolute path %s not allowed", p)
 	}
 	}
-	dir := filepath.Dir(p)
-	base := filepath.Base(p)
+	dir := path.Dir(p)
+	base := path.Base(p)
 	if dir == "." {
 	if dir == "." {
 		dir = ""
 		dir = ""
 	}
 	}
@@ -51,12 +55,12 @@ func (v *Validator) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err
 	}
 	}
 
 
 	if dir != v.parentDirs[len(v.parentDirs)-1].dir || v.parentDirs[i].last >= base {
 	if dir != v.parentDirs[len(v.parentDirs)-1].dir || v.parentDirs[i].last >= base {
-		return errors.Errorf("changes out of order: %q %q", p, filepath.Join(v.parentDirs[i].dir, v.parentDirs[i].last))
+		return errors.Errorf("changes out of order: %q %q", p, path.Join(v.parentDirs[i].dir, v.parentDirs[i].last))
 	}
 	}
 	v.parentDirs[i].last = base
 	v.parentDirs[i].last = base
 	if kind != ChangeKindDelete && fi.IsDir() {
 	if kind != ChangeKindDelete && fi.IsDir() {
 		v.parentDirs = append(v.parentDirs, parent{
 		v.parentDirs = append(v.parentDirs, parent{
-			dir:  filepath.Join(dir, base),
+			dir:  path.Join(dir, base),
 			last: "",
 			last: "",
 		})
 		})
 	}
 	}

+ 10 - 4
vendor/github.com/tonistiigi/fsutil/walker.go

@@ -13,8 +13,9 @@ import (
 )
 )
 
 
 type WalkOpt struct {
 type WalkOpt struct {
-	IncludePaths    []string // todo: remove?
+	IncludePatterns []string
 	ExcludePatterns []string
 	ExcludePatterns []string
+	Map             func(*Stat) bool
 }
 }
 
 
 func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) error {
 func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) error {
@@ -57,9 +58,9 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err
 		}
 		}
 
 
 		if opt != nil {
 		if opt != nil {
-			if opt.IncludePaths != nil {
+			if opt.IncludePatterns != nil {
 				matched := false
 				matched := false
-				for _, p := range opt.IncludePaths {
+				for _, p := range opt.IncludePatterns {
 					if m, _ := filepath.Match(p, path); m {
 					if m, _ := filepath.Match(p, path); m {
 						matched = true
 						matched = true
 						break
 						break
@@ -138,7 +139,12 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err
 		case <-ctx.Done():
 		case <-ctx.Done():
 			return ctx.Err()
 			return ctx.Err()
 		default:
 		default:
-			if err := fn(path, &StatInfo{stat}, nil); err != nil {
+			if opt != nil && opt.Map != nil {
+				if allowed := opt.Map(stat); !allowed {
+					return nil
+				}
+			}
+			if err := fn(stat.Path, &StatInfo{stat}, nil); err != nil {
 				return err
 				return err
 			}
 			}
 		}
 		}