Parcourir la source

Merge pull request #312 from tonistiigi/1903-buildkit-bump

[19.03] vendor: update buildkit to f5a55a95
Andrew Hsu il y a 6 ans
Parent
commit
8ecf5409e9
64 fichiers modifiés avec 951 ajouts et 341 suppressions
  1. 31 0
      builder/builder-next/exporter/writer.go
  2. 43 0
      builder/builder-next/worker/worker.go
  3. 1 1
      vendor.conf
  4. 129 89
      vendor/github.com/moby/buildkit/README.md
  5. 18 7
      vendor/github.com/moby/buildkit/cache/manager.go
  6. 39 27
      vendor/github.com/moby/buildkit/cache/metadata/metadata.go
  7. 27 2
      vendor/github.com/moby/buildkit/cache/refs.go
  8. 8 8
      vendor/github.com/moby/buildkit/cache/remotecache/import.go
  9. 1 1
      vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go
  10. 1 1
      vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go
  11. 3 3
      vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go
  12. 7 7
      vendor/github.com/moby/buildkit/cache/util/fsutil.go
  13. 10 4
      vendor/github.com/moby/buildkit/client/llb/exec.go
  14. 15 7
      vendor/github.com/moby/buildkit/client/llb/meta.go
  15. 4 4
      vendor/github.com/moby/buildkit/client/llb/state.go
  16. 2 2
      vendor/github.com/moby/buildkit/client/solve.go
  17. 1 1
      vendor/github.com/moby/buildkit/control/control.go
  18. 15 6
      vendor/github.com/moby/buildkit/executor/oci/hosts.go
  19. 47 6
      vendor/github.com/moby/buildkit/executor/oci/resolvconf.go
  20. 13 0
      vendor/github.com/moby/buildkit/executor/oci/spec.go
  21. 5 17
      vendor/github.com/moby/buildkit/executor/oci/spec_unix.go
  22. 2 10
      vendor/github.com/moby/buildkit/executor/oci/user.go
  23. 11 6
      vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go
  24. 1 1
      vendor/github.com/moby/buildkit/exporter/tar/export.go
  25. 4 2
      vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
  26. 19 28
      vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
  27. 11 0
      vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunsecurity.go
  28. 6 1
      vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go
  29. 27 0
      vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runsecurity.go
  30. 12 0
      vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go
  31. 83 0
      vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runsecurity.go
  32. 1 4
      vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go
  33. 1 1
      vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go
  34. 3 2
      vendor/github.com/moby/buildkit/session/auth/auth.go
  35. 16 9
      vendor/github.com/moby/buildkit/session/content/caller.go
  36. 18 12
      vendor/github.com/moby/buildkit/session/filesync/diffcopy.go
  37. 38 15
      vendor/github.com/moby/buildkit/session/filesync/filesync.go
  38. 2 2
      vendor/github.com/moby/buildkit/session/secrets/secrets.go
  39. 5 4
      vendor/github.com/moby/buildkit/session/sshforward/copy.go
  40. 7 6
      vendor/github.com/moby/buildkit/session/sshforward/ssh.go
  41. 4 3
      vendor/github.com/moby/buildkit/session/upload/upload.go
  42. 6 4
      vendor/github.com/moby/buildkit/solver/edge.go
  43. 19 4
      vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go
  44. 1 6
      vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go
  45. 4 0
      vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go
  46. 17 5
      vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go
  47. 3 0
      vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go
  48. 4 0
      vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go
  49. 17 0
      vendor/github.com/moby/buildkit/solver/llbsolver/solver.go
  50. 89 0
      vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go
  51. 1 1
      vendor/github.com/moby/buildkit/solver/result.go
  52. 8 0
      vendor/github.com/moby/buildkit/util/binfmt_misc/386_binary.go
  53. 7 0
      vendor/github.com/moby/buildkit/util/binfmt_misc/386_check.go
  54. 7 0
      vendor/github.com/moby/buildkit/util/binfmt_misc/386_check_386.go
  55. 24 0
      vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go
  56. 8 0
      vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_binary.go
  57. 7 0
      vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check.go
  58. 7 0
      vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check_ppc64le.go
  59. 8 0
      vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_binary.go
  60. 7 0
      vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check.go
  61. 7 0
      vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check_s390x.go
  62. 8 4
      vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go
  63. 0 18
      vendor/github.com/moby/buildkit/util/network/network.go
  64. 1 0
      vendor/github.com/moby/buildkit/worker/worker.go

+ 31 - 0
builder/builder-next/exporter/writer.go

@@ -137,6 +137,37 @@ func normalizeLayersAndHistory(diffs []digest.Digest, history []ocispec.History,
 		history[i] = h
 	}
 
+	// Find the first new layer time. Otherwise, the history item for a first
+	// metadata command would be the creation time of a base image layer.
+	// If there is no such then the last layer with timestamp.
+	var created *time.Time
+	var noCreatedTime bool
+	for _, h := range history {
+		if h.Created != nil {
+			created = h.Created
+			if noCreatedTime {
+				break
+			}
+		} else {
+			noCreatedTime = true
+		}
+	}
+
+	// Fill in created times for all history items to be either the first new
+	// layer time or the previous layer.
+	noCreatedTime = false
+	for i, h := range history {
+		if h.Created != nil {
+			if noCreatedTime {
+				created = h.Created
+			}
+		} else {
+			noCreatedTime = true
+			h.Created = created
+		}
+		history[i] = h
+	}
+
 	return diffs, history
 }
 

+ 43 - 0
builder/builder-next/worker/worker.go

@@ -7,6 +7,7 @@ import (
 	"io/ioutil"
 	nethttp "net/http"
 	"runtime"
+	"strings"
 	"time"
 
 	"github.com/containerd/containerd/content"
@@ -43,6 +44,7 @@ import (
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
+	bolt "go.etcd.io/bbolt"
 )
 
 const labelCreatedAt = "buildkit/createdat"
@@ -257,6 +259,47 @@ func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIf
 	}, nil
 }
 
+// PruneCacheMounts removes the current cache snapshots for specified IDs
+func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error {
+	mu := ops.CacheMountsLocker()
+	mu.Lock()
+	defer mu.Unlock()
+
+	for _, id := range ids {
+		id = "cache-dir:" + id
+		sis, err := w.MetadataStore.Search(id)
+		if err != nil {
+			return err
+		}
+		for _, si := range sis {
+			for _, k := range si.Indexes() {
+				if k == id || strings.HasPrefix(k, id+":") {
+					if siCached := w.CacheManager.Metadata(si.ID()); siCached != nil {
+						si = siCached
+					}
+					if err := cache.CachePolicyDefault(si); err != nil {
+						return err
+					}
+					si.Queue(func(b *bolt.Bucket) error {
+						return si.SetValue(b, k, nil)
+					})
+					if err := si.Commit(); err != nil {
+						return err
+					}
+					// if ref is unused try to clean it up right away by releasing it
+					if mref, err := w.CacheManager.GetMutable(ctx, si.ID()); err == nil {
+						go mref.Release(context.TODO())
+					}
+					break
+				}
+			}
+		}
+	}
+
+	ops.ClearActiveCacheMounts()
+	return nil
+}
+
 // FromRemote converts a remote snapshot reference to a local one
 func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) {
 	rootfs, err := getLayers(ctx, remote.Descriptors)

+ 1 - 1
vendor.conf

@@ -27,7 +27,7 @@ github.com/imdario/mergo                            7c29201646fa3de8506f70121347
 golang.org/x/sync                                   e225da77a7e68af35c70ccbf71af2b83e6acac3c
 
 # buildkit
-github.com/moby/buildkit                            1f89ec125f84c097bdf3a063be622c4238dba5f8
+github.com/moby/buildkit                            f5a55a9516d1c6e2ade9bec22b83259caeed3a84 
 github.com/tonistiigi/fsutil                        3bbb99cdbd76619ab717299830c60f6f2a533a6b
 github.com/grpc-ecosystem/grpc-opentracing          8e809c8a86450a29b90dcc9efbf062d0fe6d9746
 github.com/opentracing/opentracing-go               1361b9cd60be79c4c3a7fa9841b3c132e40066a7

+ 129 - 89
vendor/github.com/moby/buildkit/README.md

@@ -1,27 +1,25 @@
 [![asciicinema example](https://asciinema.org/a/gPEIEo1NzmDTUu2bEPsUboqmU.png)](https://asciinema.org/a/gPEIEo1NzmDTUu2bEPsUboqmU)
 
-
 ## BuildKit
 
 [![GoDoc](https://godoc.org/github.com/moby/buildkit?status.svg)](https://godoc.org/github.com/moby/buildkit/client/llb)
 [![Build Status](https://travis-ci.org/moby/buildkit.svg?branch=master)](https://travis-ci.org/moby/buildkit)
 [![Go Report Card](https://goreportcard.com/badge/github.com/moby/buildkit)](https://goreportcard.com/report/github.com/moby/buildkit)
 
-
 BuildKit is a toolkit for converting source code to build artifacts in an efficient, expressive and repeatable manner.
 
 Key features:
-- Automatic garbage collection
-- Extendable frontend formats
-- Concurrent dependency resolution
-- Efficient instruction caching
-- Build cache import/export
-- Nested build job invocations
-- Distributable workers
-- Multiple output formats
-- Pluggable architecture
-- Execution without root privileges
 
+-   Automatic garbage collection
+-   Extendable frontend formats
+-   Concurrent dependency resolution
+-   Efficient instruction caching
+-   Build cache import/export
+-   Nested build job invocations
+-   Distributable workers
+-   Multiple output formats
+-   Pluggable architecture
+-   Execution without root privileges
 
 Read the proposal from https://github.com/moby/moby/issues/32925
 
@@ -33,20 +31,21 @@ Introductory blog post https://blog.mobyproject.org/introducing-buildkit-17e056c
 
 BuildKit is used by the following projects:
 
-- [Moby & Docker](https://github.com/moby/moby/pull/37151)
-- [img](https://github.com/genuinetools/img)
-- [OpenFaaS Cloud](https://github.com/openfaas/openfaas-cloud)
-- [container build interface](https://github.com/containerbuilding/cbi)
-- [Knative Build Templates](https://github.com/knative/build-templates)
-- [vab](https://github.com/stellarproject/vab)
-- [Rio](https://github.com/rancher/rio) (on roadmap)
+-   [Moby & Docker](https://github.com/moby/moby/pull/37151)
+-   [img](https://github.com/genuinetools/img)
+-   [OpenFaaS Cloud](https://github.com/openfaas/openfaas-cloud)
+-   [container build interface](https://github.com/containerbuilding/cbi)
+-   [Knative Build Templates](https://github.com/knative/build-templates)
+-   [the Sanic build tool](https://github.com/distributed-containers-inc/sanic)
+-   [vab](https://github.com/stellarproject/vab)
+-   [Rio](https://github.com/rancher/rio) (on roadmap)
 
 ### Quick start
 
 Dependencies:
-- [runc](https://github.com/opencontainers/runc)
-- [containerd](https://github.com/containerd/containerd) (if you want to use containerd worker)
 
+-   [runc](https://github.com/opencontainers/runc)
+-   [containerd](https://github.com/containerd/containerd) (if you want to use containerd worker)
 
 The following command installs `buildkitd` and `buildctl` to `/usr/local/bin`:
 
@@ -58,14 +57,13 @@ You can also use `make binaries-all` to prepare `buildkitd.containerd_only` and
 
 #### Starting the buildkitd daemon:
 
-```
+```bash
 buildkitd --debug --root /var/lib/buildkit
 ```
 
 The buildkitd daemon supports two worker backends: OCI (runc) and containerd.
 
-By default, the OCI (runc) worker is used.
-You can set `--oci-worker=false --containerd-worker=true` to use the containerd worker.
+By default, the OCI (runc) worker is used. You can set `--oci-worker=false --containerd-worker=true` to use the containerd worker.
 
 We are open to adding more backends.
 
@@ -73,44 +71,46 @@ We are open to adding more backends.
 
 BuildKit builds are based on a binary intermediate format called LLB that is used for defining the dependency graph for processes running part of your build. tl;dr: LLB is to Dockerfile what LLVM IR is to C.
 
-- Marshaled as Protobuf messages
-- Concurrently executable
-- Efficiently cacheable
-- Vendor-neutral (i.e. non-Dockerfile languages can be easily implemented)
+-   Marshaled as Protobuf messages
+-   Concurrently executable
+-   Efficiently cacheable
+-   Vendor-neutral (i.e. non-Dockerfile languages can be easily implemented)
 
 See [`solver/pb/ops.proto`](./solver/pb/ops.proto) for the format definition.
 
 Currently, following high-level languages has been implemented for LLB:
 
-- Dockerfile (See [Exploring Dockerfiles](#exploring-dockerfiles))
-- [Buildpacks](https://github.com/tonistiigi/buildkit-pack)
-- (open a PR to add your own language)
+-   Dockerfile (See [Exploring Dockerfiles](#exploring-dockerfiles))
+-   [Buildpacks](https://github.com/tonistiigi/buildkit-pack)
+-   (open a PR to add your own language)
 
 For understanding the basics of LLB, `examples/buildkit*` directory contains scripts that define how to build different configurations of BuildKit itself and its dependencies using the `client` package. Running one of these scripts generates a protobuf definition of a build graph. Note that the script itself does not execute any steps of the build.
 
 You can use `buildctl debug dump-llb` to see what data is in this definition. Add `--dot` to generate dot layout.
 
 ```bash
-go run examples/buildkit0/buildkit.go | buildctl debug dump-llb | jq .
+go run examples/buildkit0/buildkit.go \
+    | buildctl debug dump-llb \
+    | jq .
 ```
 
-To start building use `buildctl build` command. The example script accepts `--with-containerd` flag to choose if containerd binaries and support should be included in the end result as well. 
+To start building use `buildctl build` command. The example script accepts `--with-containerd` flag to choose if containerd binaries and support should be included in the end result as well.
 
 ```bash
-go run examples/buildkit0/buildkit.go | buildctl build
+go run examples/buildkit0/buildkit.go \
+    | buildctl build
 ```
 
 `buildctl build` will show interactive progress bar by default while the build job is running. If the path to the trace file is specified, the trace file generated will contain all information about the timing of the individual steps and logs.
 
 Different versions of the example scripts show different ways of describing the build definition for this project to show the capabilities of the library. New versions have been added when new features have become available.
 
-- `./examples/buildkit0` - uses only exec operations, defines a full stage per component.
-- `./examples/buildkit1` - cloning git repositories has been separated for extra concurrency.
-- `./examples/buildkit2` - uses git sources directly instead of running `git clone`, allowing better performance and much safer caching.
-- `./examples/buildkit3` - allows using local source files for separate components eg. `./buildkit3 --runc=local | buildctl build --local runc-src=some/local/path`  
-- `./examples/dockerfile2llb` - can be used to convert a Dockerfile to LLB for debugging purposes
-- `./examples/gobuild` - shows how to use nested invocation to generate LLB for Go package internal dependencies
-
+-   `./examples/buildkit0` - uses only exec operations, defines a full stage per component.
+-   `./examples/buildkit1` - cloning git repositories has been separated for extra concurrency.
+-   `./examples/buildkit2` - uses git sources directly instead of running `git clone`, allowing better performance and much safer caching.
+-   `./examples/buildkit3` - allows using local source files for separate components eg. `./buildkit3 --runc=local | buildctl build --local runc-src=some/local/path`
+-   `./examples/dockerfile2llb` - can be used to convert a Dockerfile to LLB for debugging purposes
+-   `./examples/gobuild` - shows how to use nested invocation to generate LLB for Go package internal dependencies
 
 #### Exploring Dockerfiles
 
@@ -120,9 +120,18 @@ During development, Dockerfile frontend (dockerfile.v0) is also part of the Buil
 
 ##### Building a Dockerfile with `buildctl`
 
-```
-buildctl build --frontend=dockerfile.v0 --local context=. --local dockerfile=.
-buildctl build --frontend=dockerfile.v0 --local context=. --local dockerfile=. --opt target=foo --opt build-arg:foo=bar
+```bash
+buildctl build \
+    --frontend=dockerfile.v0 \
+    --local context=. \
+    --local dockerfile=.
+# or
+buildctl build \
+    --frontend=dockerfile.v0 \
+    --local context=. \
+    --local dockerfile=. \
+    --opt target=foo \
+    --opt build-arg:foo=bar
 ```
 
 `--local` exposes local source files from client to the builder. `context` and `dockerfile` are the names Dockerfile frontend looks for build context and Dockerfile location.
@@ -131,8 +140,9 @@ buildctl build --frontend=dockerfile.v0 --local context=. --local dockerfile=. -
 
 For people familiar with `docker build` command, there is an example wrapper utility in `./examples/build-using-dockerfile` that allows building Dockerfiles with BuildKit using a syntax similar to `docker build`.
 
-```
-go build ./examples/build-using-dockerfile && sudo install build-using-dockerfile /usr/local/bin
+```bash
+go build ./examples/build-using-dockerfile \
+    && sudo install build-using-dockerfile /usr/local/bin
 
 build-using-dockerfile -t myimage .
 build-using-dockerfile -t mybuildkit -f ./hack/dockerfiles/test.Dockerfile .
@@ -145,10 +155,18 @@ docker inspect myimage
 
 External versions of the Dockerfile frontend are pushed to https://hub.docker.com/r/docker/dockerfile-upstream and https://hub.docker.com/r/docker/dockerfile and can be used with the gateway frontend. The source for the external frontend is currently located in `./frontend/dockerfile/cmd/dockerfile-frontend` but will move out of this repository in the future ([#163](https://github.com/moby/buildkit/issues/163)). For automatic build from master branch of this repository `docker/dockerfile-upsteam:master` or `docker/dockerfile-upstream:master-experimental` image can be used.
 
+```bash
+buildctl build \
+    --frontend gateway.v0 \
+    --opt source=docker/dockerfile \
+    --local context=. \
+    --local dockerfile=.
+buildctl build \
+    --frontend gateway.v0 \
+    --opt source=docker/dockerfile \
+    --opt context=git://github.com/moby/moby \
+    --opt build-arg:APT_MIRROR=cdn-fastly.deb.debian.org
 ```
-buildctl build --frontend gateway.v0 --opt source=docker/dockerfile --local context=. --local dockerfile=.
-buildctl build --frontend gateway.v0 --opt source=docker/dockerfile --opt context=git://github.com/moby/moby --opt build-arg:APT_MIRROR=cdn-fastly.deb.debian.org
-````
 
 ##### Building a Dockerfile with experimental features like `RUN --mount=type=(bind|cache|tmpfs|secret|ssh)`
 
@@ -162,29 +180,29 @@ By default, the build result and intermediate cache will only remain internally
 
 The containerd worker needs to be used
 
-```
+```bash
 buildctl build ... --output type=image,name=docker.io/username/image
 ctr --namespace=buildkit images ls
 ```
 
 ##### Push resulting image to registry
 
-```
+```bash
 buildctl build ... --output type=image,name=docker.io/username/image,push=true
 ```
 
 If credentials are required, `buildctl` will attempt to read Docker configuration file.
 
-
 ##### Exporting build result back to client
 
 The local client will copy the files directly to the client. This is useful if BuildKit is being used for building something else than container images.
 
-```
+```bash
 buildctl build ... --output type=local,dest=path/to/output-dir
 ```
 
 To export specific files use multi-stage builds with a scratch stage and copy the needed files into that stage with `COPY --from`.
+
 ```dockerfile
 ...
 FROM scratch as testresult
@@ -193,28 +211,27 @@ COPY --from=builder /usr/src/app/testresult.xml .
 ...
 ```
 
-```
+```bash
 buildctl build ... --opt target=testresult --output type=local,dest=path/to/output-dir
 ```
 
 Tar exporter is similar to local exporter but transfers the files through a tarball.
 
-```
+```bash
 buildctl build ... --output type=tar,dest=out.tar
 buildctl build ... --output type=tar > out.tar
 ```
 
-
 ##### Exporting built image to Docker
 
-```
+```bash
 # exported tarball is also compatible with OCI spec
 buildctl build ... --output type=docker,name=myimage | docker load
 ```
 
 ##### Exporting [OCI Image Format](https://github.com/opencontainers/image-spec) tarball to client
 
-```
+```bash
 buildctl build ... --output type=oci,dest=path/to/output.tar
 buildctl build ... --output type=oci > output.tar
 ```
@@ -223,14 +240,14 @@ buildctl build ... --output type=oci > output.tar
 
 #### To/From registry
 
-```
+```bash
 buildctl build ... --export-cache type=registry,ref=localhost:5000/myrepo:buildcache
 buildctl build ... --import-cache type=registry,ref=localhost:5000/myrepo:buildcache
 ```
 
 #### To/From local filesystem
 
-```
+```bash
 buildctl build ... --export-cache type=local,dest=path/to/output-dir
 buildctl build ... --import-cache type=local,src=path/to/input-dir
 ```
@@ -238,27 +255,29 @@ buildctl build ... --import-cache type=local,src=path/to/input-dir
 The directory layout conforms to OCI Image Spec v1.0.
 
 #### `--export-cache` options
-* `mode=min` (default): only export layers for the resulting image
-* `mode=max`: export all the layers of all intermediate steps
-* `ref=docker.io/user/image:tag`: reference for `registry` cache exporter
-* `dest=path/to/output-dir`: directory for `local` cache exporter
+
+-   `mode=min` (default): only export layers for the resulting image
+-   `mode=max`: export all the layers of all intermediate steps
+-   `ref=docker.io/user/image:tag`: reference for `registry` cache exporter
+-   `dest=path/to/output-dir`: directory for `local` cache exporter
 
 #### `--import-cache` options
-* `ref=docker.io/user/image:tag`: reference for `registry` cache importer
-* `src=path/to/input-dir`: directory for `local` cache importer
-* `digest=sha256:deadbeef`: digest of the manifest list to import for `local` cache importer. Defaults to the digest of "latest" tag in `index.json`
+
+-   `ref=docker.io/user/image:tag`: reference for `registry` cache importer
+-   `src=path/to/input-dir`: directory for `local` cache importer
+-   `digest=sha256:deadbeef`: digest of the manifest list to import for `local` cache importer. Defaults to the digest of "latest" tag in `index.json`
 
 ### Other
 
 #### View build cache
 
-```
+```bash
 buildctl du -v
 ```
 
 #### Show enabled workers
 
-```
+```bash
 buildctl debug workers -v
 ```
 
@@ -268,14 +287,14 @@ BuildKit can also be used by running the `buildkitd` daemon inside a Docker cont
 
 We provide `buildkitd` container images as [`moby/buildkit`](https://hub.docker.com/r/moby/buildkit/tags/):
 
-* `moby/buildkit:latest`: built from the latest regular [release](https://github.com/moby/buildkit/releases)
-* `moby/buildkit:rootless`: same as `latest` but runs as an unprivileged user, see [`docs/rootless.md`](docs/rootless.md)
-* `moby/buildkit:master`: built from the master branch
-* `moby/buildkit:master-rootless`: same as master but runs as an unprivileged user, see [`docs/rootless.md`](docs/rootless.md)
+-   `moby/buildkit:latest`: built from the latest regular [release](https://github.com/moby/buildkit/releases)
+-   `moby/buildkit:rootless`: same as `latest` but runs as an unprivileged user, see [`docs/rootless.md`](docs/rootless.md)
+-   `moby/buildkit:master`: built from the master branch
+-   `moby/buildkit:master-rootless`: same as master but runs as an unprivileged user, see [`docs/rootless.md`](docs/rootless.md)
 
 To run daemon in a container:
 
-```
+```bash
 docker run -d --privileged -p 1234:1234 moby/buildkit:latest --addr tcp://0.0.0.0:1234
 export BUILDKIT_HOST=tcp://0.0.0.0:1234
 buildctl build --help
@@ -283,26 +302,50 @@ buildctl build --help
 
 To run client and an ephemeral daemon in a single container ("daemonless mode"):
 
+```bash
+docker run \
+    -it \
+    --rm \
+    --privileged \
+    -v /path/to/dir:/tmp/work \
+    --entrypoint buildctl-daemonless.sh \
+    moby/buildkit:master \
+        build \
+        --frontend dockerfile.v0 \
+        --local context=/tmp/work \
+        --local dockerfile=/tmp/work
 ```
-docker run -it --rm --privileged -v /path/to/dir:/tmp/work --entrypoint buildctl-daemonless.sh moby/buildkit:master build --frontend dockerfile.v0 --local context=/tmp/work --local dockerfile=/tmp/work
-```
+
 or
-```
-docker run -it --rm --security-opt seccomp=unconfined --security-opt apparmor=unconfined -e BUILDKITD_FLAGS=--oci-worker-no-process-sandbox -v /path/to/dir:/tmp/work --entrypoint buildctl-daemonless.sh moby/buildkit:master-rootless build --frontend dockerfile.v0 --local context=/tmp/work --local dockerfile=/tmp/work
-```
 
-The images can be also built locally using `./hack/dockerfiles/test.Dockerfile` (or `./hack/dockerfiles/test.buildkit.Dockerfile` if you already have BuildKit).
-Run `make images` to build the images as `moby/buildkit:local` and `moby/buildkit:local-rootless`.
+```bash
+docker run \
+    -it \
+    --rm \
+    --security-opt seccomp=unconfined \
+    --security-opt apparmor=unconfined \
+    -e BUILDKITD_FLAGS=--oci-worker-no-process-sandbox \
+    -v /path/to/dir:/tmp/work \
+    --entrypoint buildctl-daemonless.sh \
+    moby/buildkit:master-rootless \
+        build \
+        --frontend \
+        dockerfile.v0 \
+        --local context=/tmp/work \
+        --local dockerfile=/tmp/work
+```
+
+The images can be also built locally using `./hack/dockerfiles/test.Dockerfile` (or `./hack/dockerfiles/test.buildkit.Dockerfile` if you already have BuildKit). Run `make images` to build the images as `moby/buildkit:local` and `moby/buildkit:local-rootless`.
 
 #### Connection helpers
 
 If you are running `moby/buildkit:master` or `moby/buildkit:master-rootless` as a Docker/Kubernetes container, you can use special `BUILDKIT_HOST` URL for connecting to the BuildKit daemon in the container:
 
-```
-export BUILDKIT_HOST=docker://<container>
+```bash
+export BUILDKIT_HOST=docker-container://<container>
 ```
 
-```
+```bash
 export BUILDKIT_HOST=kube-pod://<pod>
 ```
 
@@ -310,15 +353,13 @@ export BUILDKIT_HOST=kube-pod://<pod>
 
 BuildKit supports opentracing for buildkitd gRPC API and buildctl commands. To capture the trace to [Jaeger](https://github.com/jaegertracing/jaeger), set `JAEGER_TRACE` environment variable to the collection address.
 
-
-```
+```bash
 docker run -d -p6831:6831/udp -p16686:16686 jaegertracing/all-in-one:latest
 export JAEGER_TRACE=0.0.0.0:6831
 # restart buildkitd and buildctl so they know JAEGER_TRACE
 # any buildctl command should be traced to http://127.0.0.1:16686/
 ```
 
-
 ### Supported runc version
 
 During development, BuildKit is tested with the version of runc that is being used by the containerd repository. Please refer to [runc.md](https://github.com/containerd/containerd/blob/v1.2.1/RUNC.md) for more information.
@@ -329,5 +370,4 @@ Please refer to [`docs/rootless.md`](docs/rootless.md).
 
 ### Contributing
 
-Want to contribute to BuildKit? Awesome! You can find information about
-contributing to this project in the [CONTRIBUTING.md](/.github/CONTRIBUTING.md)
+Want to contribute to BuildKit? Awesome! You can find information about contributing to this project in the [CONTRIBUTING.md](/.github/CONTRIBUTING.md)

+ 18 - 7
vendor/github.com/moby/buildkit/cache/manager.go

@@ -36,6 +36,7 @@ type Accessor interface {
 	New(ctx context.Context, s ImmutableRef, opts ...RefOption) (MutableRef, error)
 	GetMutable(ctx context.Context, id string) (MutableRef, error) // Rebase?
 	IdentityMapping() *idtools.IdentityMapping
+	Metadata(string) *metadata.StorageItem
 }
 
 type Controller interface {
@@ -124,6 +125,16 @@ func (cm *cacheManager) GetFromSnapshotter(ctx context.Context, id string, opts
 	return cm.get(ctx, id, true, opts...)
 }
 
+func (cm *cacheManager) Metadata(id string) *metadata.StorageItem {
+	cm.mu.Lock()
+	defer cm.mu.Unlock()
+	r, ok := cm.records[id]
+	if !ok {
+		return nil
+	}
+	return r.Metadata()
+}
+
 // get requires manager lock to be taken
 func (cm *cacheManager) get(ctx context.Context, id string, fromSnapshotter bool, opts ...RefOption) (*immutableRef, error) {
 	rec, err := cm.getRecord(ctx, id, fromSnapshotter, opts...)
@@ -157,14 +168,14 @@ func (cm *cacheManager) get(ctx context.Context, id string, fromSnapshotter bool
 func (cm *cacheManager) getRecord(ctx context.Context, id string, fromSnapshotter bool, opts ...RefOption) (cr *cacheRecord, retErr error) {
 	if rec, ok := cm.records[id]; ok {
 		if rec.isDead() {
-			return nil, errNotFound
+			return nil, errors.Wrapf(errNotFound, "failed to get dead record %s", id)
 		}
 		return rec, nil
 	}
 
 	md, ok := cm.md.Get(id)
 	if !ok && !fromSnapshotter {
-		return nil, errNotFound
+		return nil, errors.WithStack(errNotFound)
 	}
 	if mutableID := getEqualMutable(md); mutableID != "" {
 		mutable, err := cm.getRecord(ctx, mutableID, fromSnapshotter)
@@ -222,7 +233,7 @@ func (cm *cacheManager) getRecord(ctx context.Context, id string, fromSnapshotte
 		if err := rec.remove(ctx, true); err != nil {
 			return nil, err
 		}
-		return nil, errNotFound
+		return nil, errors.Wrapf(errNotFound, "failed to get deleted record %s", id)
 	}
 
 	if err := initializeMetadata(rec, opts...); err != nil {
@@ -330,14 +341,14 @@ func (cm *cacheManager) Prune(ctx context.Context, ch chan client.UsageInfo, opt
 func (cm *cacheManager) pruneOnce(ctx context.Context, ch chan client.UsageInfo, opt client.PruneInfo) error {
 	filter, err := filters.ParseAll(opt.Filter...)
 	if err != nil {
-		return err
+		return errors.Wrapf(err, "failed to parse prune filters %v", opt.Filter)
 	}
 
 	var check ExternalRefChecker
 	if f := cm.PruneRefChecker; f != nil && (!opt.All || len(opt.Filter) > 0) {
 		c, err := f()
 		if err != nil {
-			return err
+			return errors.WithStack(err)
 		}
 		check = c
 	}
@@ -549,7 +560,7 @@ func (cm *cacheManager) markShared(m map[string]*cacheUsageInfo) error {
 	}
 	c, err := cm.PruneRefChecker()
 	if err != nil {
-		return err
+		return errors.WithStack(err)
 	}
 
 	var markAllParentsShared func(string)
@@ -590,7 +601,7 @@ type cacheUsageInfo struct {
 func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) {
 	filter, err := filters.ParseAll(opt.Filter...)
 	if err != nil {
-		return nil, err
+		return nil, errors.Wrapf(err, "failed to parse diskusage filters %v", opt.Filter)
 	}
 
 	cm.mu.Lock()

+ 39 - 27
vendor/github.com/moby/buildkit/cache/metadata/metadata.go

@@ -55,7 +55,7 @@ func (s *Store) All() ([]*StorageItem, error) {
 			return nil
 		})
 	})
-	return out, err
+	return out, errors.WithStack(err)
 }
 
 func (s *Store) Probe(index string) (bool, error) {
@@ -77,7 +77,7 @@ func (s *Store) Probe(index string) (bool, error) {
 		}
 		return nil
 	})
-	return exists, err
+	return exists, errors.WithStack(err)
 }
 
 func (s *Store) Search(index string) ([]*StorageItem, error) {
@@ -114,7 +114,7 @@ func (s *Store) Search(index string) ([]*StorageItem, error) {
 		}
 		return nil
 	})
-	return out, err
+	return out, errors.WithStack(err)
 }
 
 func (s *Store) View(id string, fn func(b *bolt.Bucket) error) error {
@@ -132,7 +132,7 @@ func (s *Store) View(id string, fn func(b *bolt.Bucket) error) error {
 }
 
 func (s *Store) Clear(id string) error {
-	return s.db.Update(func(tx *bolt.Tx) error {
+	return errors.WithStack(s.db.Update(func(tx *bolt.Tx) error {
 		external := tx.Bucket([]byte(externalBucket))
 		if external != nil {
 			external.DeleteBucket([]byte(id))
@@ -160,21 +160,21 @@ func (s *Store) Clear(id string) error {
 			}
 		}
 		return main.DeleteBucket([]byte(id))
-	})
+	}))
 }
 
 func (s *Store) Update(id string, fn func(b *bolt.Bucket) error) error {
-	return s.db.Update(func(tx *bolt.Tx) error {
+	return errors.WithStack(s.db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucketIfNotExists([]byte(mainBucket))
 		if err != nil {
-			return err
+			return errors.WithStack(err)
 		}
 		b, err = b.CreateBucketIfNotExists([]byte(id))
 		if err != nil {
-			return err
+			return errors.WithStack(err)
 		}
 		return fn(b)
-	})
+	}))
 }
 
 func (s *Store) Get(id string) (*StorageItem, bool) {
@@ -200,7 +200,7 @@ func (s *Store) Get(id string) (*StorageItem, bool) {
 }
 
 func (s *Store) Close() error {
-	return s.db.Close()
+	return errors.WithStack(s.db.Close())
 }
 
 type StorageItem struct {
@@ -222,13 +222,13 @@ func newStorageItem(id string, b *bolt.Bucket, s *Store) (*StorageItem, error) {
 			var sv Value
 			if len(v) > 0 {
 				if err := json.Unmarshal(v, &sv); err != nil {
-					return err
+					return errors.WithStack(err)
 				}
 				si.values[string(k)] = &sv
 			}
 			return nil
 		}); err != nil {
-			return si, err
+			return si, errors.WithStack(err)
 		}
 	}
 	return si, nil
@@ -250,6 +250,10 @@ func (s *StorageItem) Update(fn func(b *bolt.Bucket) error) error {
 	return s.storage.Update(s.id, fn)
 }
 
+func (s *StorageItem) Metadata() *StorageItem {
+	return s
+}
+
 func (s *StorageItem) Keys() []string {
 	keys := make([]string, 0, len(s.values))
 	for k := range s.values {
@@ -283,23 +287,23 @@ func (s *StorageItem) GetExternal(k string) ([]byte, error) {
 		return nil
 	})
 	if err != nil {
-		return nil, err
+		return nil, errors.WithStack(err)
 	}
 	return dt, nil
 }
 
 func (s *StorageItem) SetExternal(k string, dt []byte) error {
-	return s.storage.db.Update(func(tx *bolt.Tx) error {
+	return errors.WithStack(s.storage.db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucketIfNotExists([]byte(externalBucket))
 		if err != nil {
-			return err
+			return errors.WithStack(err)
 		}
 		b, err = b.CreateBucketIfNotExists([]byte(s.id))
 		if err != nil {
-			return err
+			return errors.WithStack(err)
 		}
 		return b.Put([]byte(k), dt)
-	})
+	}))
 }
 
 func (s *StorageItem) Queue(fn func(b *bolt.Bucket) error) {
@@ -311,15 +315,15 @@ func (s *StorageItem) Queue(fn func(b *bolt.Bucket) error) {
 func (s *StorageItem) Commit() error {
 	s.mu.Lock()
 	defer s.mu.Unlock()
-	return s.Update(func(b *bolt.Bucket) error {
+	return errors.WithStack(s.Update(func(b *bolt.Bucket) error {
 		for _, fn := range s.queue {
 			if err := fn(b); err != nil {
-				return err
+				return errors.WithStack(err)
 			}
 		}
 		s.queue = s.queue[:0]
 		return nil
-	})
+	}))
 }
 
 func (s *StorageItem) Indexes() (out []string) {
@@ -333,6 +337,15 @@ func (s *StorageItem) Indexes() (out []string) {
 
 func (s *StorageItem) SetValue(b *bolt.Bucket, key string, v *Value) error {
 	if v == nil {
+		if old, ok := s.values[key]; ok {
+			if old.Index != "" {
+				b, err := b.Tx().CreateBucketIfNotExists([]byte(indexBucket))
+				if err != nil {
+					return errors.WithStack(err)
+				}
+				b.Delete([]byte(indexKey(old.Index, s.ID()))) // ignore error
+			}
+		}
 		if err := b.Put([]byte(key), nil); err != nil {
 			return err
 		}
@@ -341,18 +354,18 @@ func (s *StorageItem) SetValue(b *bolt.Bucket, key string, v *Value) error {
 	}
 	dt, err := json.Marshal(v)
 	if err != nil {
-		return err
+		return errors.WithStack(err)
 	}
 	if err := b.Put([]byte(key), dt); err != nil {
-		return err
+		return errors.WithStack(err)
 	}
 	if v.Index != "" {
 		b, err := b.Tx().CreateBucketIfNotExists([]byte(indexBucket))
 		if err != nil {
-			return err
+			return errors.WithStack(err)
 		}
 		if err := b.Put([]byte(indexKey(v.Index, s.ID())), []byte{}); err != nil {
-			return err
+			return errors.WithStack(err)
 		}
 	}
 	s.values[key] = v
@@ -367,14 +380,13 @@ type Value struct {
 func NewValue(v interface{}) (*Value, error) {
 	dt, err := json.Marshal(v)
 	if err != nil {
-		return nil, err
+		return nil, errors.WithStack(err)
 	}
 	return &Value{Value: json.RawMessage(dt)}, nil
 }
 
 func (v *Value) Unmarshal(target interface{}) error {
-	err := json.Unmarshal(v.Value, target)
-	return err
+	return errors.WithStack(json.Unmarshal(v.Value, target))
 }
 
 func indexKey(index, target string) string {

+ 27 - 2
vendor/github.com/moby/buildkit/cache/refs.go

@@ -2,6 +2,7 @@ package cache
 
 import (
 	"context"
+	"strings"
 	"sync"
 
 	"github.com/containerd/containerd/mount"
@@ -190,7 +191,7 @@ func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) error {
 	}
 	if removeSnapshot {
 		if err := cr.cm.Snapshotter.Remove(ctx, cr.ID()); err != nil {
-			return err
+			return errors.Wrapf(err, "failed to remove %s", cr.ID())
 		}
 	}
 	if err := cr.cm.md.Clear(cr.ID()); err != nil {
@@ -259,7 +260,7 @@ func (sr *immutableRef) release(ctx context.Context) error {
 	if len(sr.refs) == 0 {
 		if sr.viewMount != nil { // TODO: release viewMount earlier if possible
 			if err := sr.cm.Snapshotter.Remove(ctx, sr.view); err != nil {
-				return err
+				return errors.Wrapf(err, "failed to remove view %s", sr.view)
 			}
 			sr.view = ""
 			sr.viewMount = nil
@@ -429,6 +430,10 @@ func (m *readOnlyMounter) Mount() ([]mount.Mount, error) {
 		return nil, err
 	}
 	for i, m := range mounts {
+		if m.Type == "overlay" {
+			mounts[i].Options = readonlyOverlay(m.Options)
+			continue
+		}
 		opts := make([]string, 0, len(m.Options))
 		for _, opt := range m.Options {
 			if opt != "rw" {
@@ -440,3 +445,23 @@ func (m *readOnlyMounter) Mount() ([]mount.Mount, error) {
 	}
 	return mounts, nil
 }
+
+func readonlyOverlay(opt []string) []string {
+	out := make([]string, 0, len(opt))
+	upper := ""
+	for _, o := range opt {
+		if strings.HasPrefix(o, "upperdir=") {
+			upper = strings.TrimPrefix(o, "upperdir=")
+		} else if !strings.HasPrefix(o, "workdir=") {
+			out = append(out, o)
+		}
+	}
+	if upper != "" {
+		for i, o := range out {
+			if strings.HasPrefix(o, "lowerdir=") {
+				out[i] = "lowerdir=" + upper + ":" + strings.TrimPrefix(o, "lowerdir=")
+			}
+		}
+	}
+	return out
+}

+ 8 - 8
vendor/github.com/moby/buildkit/cache/remotecache/import.go

@@ -100,7 +100,7 @@ func readBlob(ctx context.Context, provider content.Provider, desc ocispec.Descr
 			}
 		}
 	}
-	return dt, err
+	return dt, errors.WithStack(err)
 }
 
 func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte, id string, w worker.Worker) (solver.CacheManager, error) {
@@ -120,7 +120,7 @@ func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte
 				var m ocispec.Manifest
 
 				if err := json.Unmarshal(dt, &m); err != nil {
-					return err
+					return errors.WithStack(err)
 				}
 
 				if m.Config.Digest == "" || len(m.Layers) == 0 {
@@ -129,13 +129,13 @@ func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte
 
 				p, err := content.ReadBlob(ctx, ci.provider, m.Config)
 				if err != nil {
-					return err
+					return errors.WithStack(err)
 				}
 
 				var img image
 
 				if err := json.Unmarshal(p, &img); err != nil {
-					return err
+					return errors.WithStack(err)
 				}
 
 				if len(img.Rootfs.DiffIDs) != len(m.Layers) {
@@ -149,7 +149,7 @@ func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte
 
 				var config v1.CacheConfig
 				if err := json.Unmarshal(img.Cache, &config.Records); err != nil {
-					return err
+					return errors.WithStack(err)
 				}
 
 				createdDates, createdMsg, err := parseCreatedLayerInfo(img)
@@ -181,7 +181,7 @@ func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte
 
 				dt, err = json.Marshal(config)
 				if err != nil {
-					return err
+					return errors.WithStack(err)
 				}
 
 				mu.Lock()
@@ -217,7 +217,7 @@ func (ci *contentCacheImporter) allDistributionManifests(ctx context.Context, dt
 	case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
 		var index ocispec.Index
 		if err := json.Unmarshal(dt, &index); err != nil {
-			return err
+			return errors.WithStack(err)
 		}
 
 		for _, d := range index.Manifests {
@@ -226,7 +226,7 @@ func (ci *contentCacheImporter) allDistributionManifests(ctx context.Context, dt
 			}
 			p, err := content.ReadBlob(ctx, ci.provider, d)
 			if err != nil {
-				return err
+				return errors.WithStack(err)
 			}
 			if err := ci.allDistributionManifests(ctx, p, m); err != nil {
 				return err

+ 1 - 1
vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go

@@ -254,7 +254,7 @@ func (cs *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult)
 
 	ref, err := cs.w.FromRemote(ctx, item.result)
 	if err != nil {
-		return nil, err
+		return nil, errors.Wrap(err, "failed to load result from remote")
 	}
 	return worker.NewWorkerRefResult(ref, cs.w), nil
 }

+ 1 - 1
vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go

@@ -12,7 +12,7 @@ import (
 func Parse(configJSON []byte, provider DescriptorProvider, t solver.CacheExporterTarget) error {
 	var config CacheConfig
 	if err := json.Unmarshal(configJSON, &config); err != nil {
-		return err
+		return errors.WithStack(err)
 	}
 
 	return ParseConfig(config, provider, t)

+ 3 - 3
vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go

@@ -67,8 +67,8 @@ func sortConfig(cc *CacheConfig) {
 		if ri.Digest != rj.Digest {
 			return ri.Digest < rj.Digest
 		}
-		if len(ri.Inputs) != len(ri.Inputs) {
-			return len(ri.Inputs) < len(ri.Inputs)
+		if len(ri.Inputs) != len(rj.Inputs) {
+			return len(ri.Inputs) < len(rj.Inputs)
 		}
 		for i, inputs := range ri.Inputs {
 			if len(ri.Inputs[i]) != len(rj.Inputs[i]) {
@@ -76,7 +76,7 @@ func sortConfig(cc *CacheConfig) {
 			}
 			for j := range inputs {
 				if ri.Inputs[i][j].Selector != rj.Inputs[i][j].Selector {
-					return ri.Inputs[i][j].Selector != rj.Inputs[i][j].Selector
+					return ri.Inputs[i][j].Selector < rj.Inputs[i][j].Selector
 				}
 				return cc.Records[ri.Inputs[i][j].LinkIndex].Digest < cc.Records[rj.Inputs[i][j].LinkIndex].Digest
 			}

+ 7 - 7
vendor/github.com/moby/buildkit/cache/util/fsutil.go

@@ -61,23 +61,23 @@ func ReadFile(ctx context.Context, ref cache.ImmutableRef, req ReadRequest) ([]b
 	err := withMount(ctx, ref, func(root string) error {
 		fp, err := fs.RootPath(root, req.Filename)
 		if err != nil {
-			return err
+			return errors.WithStack(err)
 		}
 
 		if req.Range == nil {
 			dt, err = ioutil.ReadFile(fp)
 			if err != nil {
-				return err
+				return errors.WithStack(err)
 			}
 		} else {
 			f, err := os.Open(fp)
 			if err != nil {
-				return err
+				return errors.WithStack(err)
 			}
 			dt, err = ioutil.ReadAll(io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length)))
 			f.Close()
 			if err != nil {
-				return err
+				return errors.WithStack(err)
 			}
 		}
 		return nil
@@ -101,7 +101,7 @@ func ReadDir(ctx context.Context, ref cache.ImmutableRef, req ReadDirRequest) ([
 	err := withMount(ctx, ref, func(root string) error {
 		fp, err := fs.RootPath(root, req.Path)
 		if err != nil {
-			return err
+			return errors.WithStack(err)
 		}
 		return fsutil.Walk(ctx, fp, &wo, func(path string, info os.FileInfo, err error) error {
 			if err != nil {
@@ -128,10 +128,10 @@ func StatFile(ctx context.Context, ref cache.ImmutableRef, path string) (*fstype
 	err := withMount(ctx, ref, func(root string) error {
 		fp, err := fs.RootPath(root, path)
 		if err != nil {
-			return err
+			return errors.WithStack(err)
 		}
 		if st, err = fsutil.Stat(fp); err != nil {
-			return err
+			return errors.WithStack(err)
 		}
 		return nil
 	})

+ 10 - 4
vendor/github.com/moby/buildkit/client/llb/exec.go

@@ -427,11 +427,13 @@ func Security(s pb.SecurityMode) RunOption {
 }
 
 func Shlex(str string) RunOption {
-	return Shlexf(str)
+	return runOptionFunc(func(ei *ExecInfo) {
+		ei.State = shlexf(str, false)(ei.State)
+	})
 }
 func Shlexf(str string, v ...interface{}) RunOption {
 	return runOptionFunc(func(ei *ExecInfo) {
-		ei.State = shlexf(str, v...)(ei.State)
+		ei.State = shlexf(str, true, v...)(ei.State)
 	})
 }
 
@@ -442,7 +444,9 @@ func Args(a []string) RunOption {
 }
 
 func AddEnv(key, value string) RunOption {
-	return AddEnvf(key, value)
+	return runOptionFunc(func(ei *ExecInfo) {
+		ei.State = ei.State.AddEnv(key, value)
+	})
 }
 
 func AddEnvf(key, value string, v ...interface{}) RunOption {
@@ -458,7 +462,9 @@ func User(str string) RunOption {
 }
 
 func Dir(str string) RunOption {
-	return Dirf(str)
+	return runOptionFunc(func(ei *ExecInfo) {
+		ei.State = ei.State.Dir(str)
+	})
 }
 func Dirf(str string, v ...interface{}) RunOption {
 	return runOptionFunc(func(ei *ExecInfo) {

+ 15 - 7
vendor/github.com/moby/buildkit/client/llb/meta.go

@@ -24,19 +24,24 @@ var (
 	keySecurity  = contextKeyT("llb.security")
 )
 
-func addEnvf(key, value string, v ...interface{}) StateOption {
+func addEnvf(key, value string, replace bool, v ...interface{}) StateOption {
+	if replace {
+		value = fmt.Sprintf(value, v...)
+	}
 	return func(s State) State {
-		return s.WithValue(keyEnv, getEnv(s).AddOrReplace(key, fmt.Sprintf(value, v...)))
+		return s.WithValue(keyEnv, getEnv(s).AddOrReplace(key, value))
 	}
 }
 
 func dir(str string) StateOption {
-	return dirf(str)
+	return dirf(str, false)
 }
 
-func dirf(str string, v ...interface{}) StateOption {
+func dirf(value string, replace bool, v ...interface{}) StateOption {
+	if replace {
+		value = fmt.Sprintf(value, v...)
+	}
 	return func(s State) State {
-		value := fmt.Sprintf(str, v...)
 		if !path.IsAbs(value) {
 			prev := getDir(s)
 			if prev == "" {
@@ -100,9 +105,12 @@ func args(args ...string) StateOption {
 	}
 }
 
-func shlexf(str string, v ...interface{}) StateOption {
+func shlexf(str string, replace bool, v ...interface{}) StateOption {
+	if replace {
+		str = fmt.Sprintf(str, v...)
+	}
 	return func(s State) State {
-		arg, err := shlex.Split(fmt.Sprintf(str, v...))
+		arg, err := shlex.Split(str)
 		if err != nil {
 			// TODO: handle error
 		}

+ 4 - 4
vendor/github.com/moby/buildkit/client/llb/state.go

@@ -240,18 +240,18 @@ func (s State) File(a *FileAction, opts ...ConstraintsOpt) State {
 }
 
 func (s State) AddEnv(key, value string) State {
-	return s.AddEnvf(key, value)
+	return addEnvf(key, value, false)(s)
 }
 
 func (s State) AddEnvf(key, value string, v ...interface{}) State {
-	return addEnvf(key, value, v...)(s)
+	return addEnvf(key, value, true, v...)(s)
 }
 
 func (s State) Dir(str string) State {
-	return s.Dirf(str)
+	return dirf(str, false)(s)
 }
 func (s State) Dirf(str string, v ...interface{}) State {
-	return dirf(str, v...)(s)
+	return dirf(str, true, v...)(s)
 }
 
 func (s State) GetEnv(key string) (string, bool) {

+ 2 - 2
vendor/github.com/moby/buildkit/client/solve.go

@@ -46,8 +46,8 @@ type SolveOpt struct {
 type ExportEntry struct {
 	Type      string
 	Attrs     map[string]string
-	Output    io.WriteCloser // for ExporterOCI and ExporterDocker
-	OutputDir string         // for ExporterLocal
+	Output    func(map[string]string) (io.WriteCloser, error) // for ExporterOCI and ExporterDocker
+	OutputDir string                                          // for ExporterLocal
 }
 
 type CacheOptionsEntry struct {

+ 1 - 1
vendor/github.com/moby/buildkit/control/control.go

@@ -38,13 +38,13 @@ type Opt struct {
 }
 
 type Controller struct { // TODO: ControlService
+	buildCount       int64
 	opt              Opt
 	solver           *llbsolver.Solver
 	cache            solver.CacheManager
 	gatewayForwarder *controlgateway.GatewayForwarder
 	throttledGC      func()
 	gcmu             sync.Mutex
-	buildCount       int64
 }
 
 func NewController(opt Opt) (*Controller, error) {

+ 15 - 6
vendor/github.com/moby/buildkit/executor/oci/hosts.go

@@ -8,6 +8,7 @@ import (
 	"os"
 	"path/filepath"
 
+	"github.com/docker/docker/pkg/idtools"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/identity"
 )
@@ -17,10 +18,10 @@ const hostsContent = `
 ::1	localhost ip6-localhost ip6-loopback
 `
 
-func GetHostsFile(ctx context.Context, stateDir string, extraHosts []executor.HostIP) (string, func(), error) {
+func GetHostsFile(ctx context.Context, stateDir string, extraHosts []executor.HostIP, idmap *idtools.IdentityMapping) (string, func(), error) {
 	if len(extraHosts) == 0 {
 		_, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) {
-			_, _, err := makeHostsFile(stateDir, nil)
+			_, _, err := makeHostsFile(stateDir, nil, idmap)
 			return nil, err
 		})
 		if err != nil {
@@ -28,10 +29,10 @@ func GetHostsFile(ctx context.Context, stateDir string, extraHosts []executor.Ho
 		}
 		return filepath.Join(stateDir, "hosts"), func() {}, nil
 	}
-	return makeHostsFile(stateDir, extraHosts)
+	return makeHostsFile(stateDir, extraHosts, idmap)
 }
 
-func makeHostsFile(stateDir string, extraHosts []executor.HostIP) (string, func(), error) {
+func makeHostsFile(stateDir string, extraHosts []executor.HostIP, idmap *idtools.IdentityMapping) (string, func(), error) {
 	p := filepath.Join(stateDir, "hosts")
 	if len(extraHosts) != 0 {
 		p += "." + identity.NewID()
@@ -56,11 +57,19 @@ func makeHostsFile(stateDir string, extraHosts []executor.HostIP) (string, func(
 		}
 	}
 
-	if err := ioutil.WriteFile(p+".tmp", b.Bytes(), 0644); err != nil {
+	tmpPath := p + ".tmp"
+	if err := ioutil.WriteFile(tmpPath, b.Bytes(), 0644); err != nil {
 		return "", nil, err
 	}
 
-	if err := os.Rename(p+".tmp", p); err != nil {
+	if idmap != nil {
+		root := idmap.RootPair()
+		if err := os.Chown(tmpPath, root.UID, root.GID); err != nil {
+			return "", nil, err
+		}
+	}
+
+	if err := os.Rename(tmpPath, p); err != nil {
 		return "", nil, err
 	}
 	return p, func() {

+ 47 - 6
vendor/github.com/moby/buildkit/executor/oci/resolvconf.go

@@ -6,7 +6,9 @@ import (
 	"os"
 	"path/filepath"
 
+	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/libnetwork/resolvconf"
+	"github.com/docker/libnetwork/types"
 	"github.com/moby/buildkit/util/flightcontrol"
 )
 
@@ -14,7 +16,13 @@ var g flightcontrol.Group
 var notFirstRun bool
 var lastNotEmpty bool
 
-func GetResolvConf(ctx context.Context, stateDir string) (string, error) {
+type DNSConfig struct {
+	Nameservers   []string
+	Options       []string
+	SearchDomains []string
+}
+
+func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.IdentityMapping, dns *DNSConfig) (string, error) {
 	p := filepath.Join(stateDir, "resolv.conf")
 	_, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) {
 		generate := !notFirstRun
@@ -60,16 +68,49 @@ func GetResolvConf(ctx context.Context, stateDir string) (string, error) {
 			dt = f.Content
 		}
 
-		f, err = resolvconf.FilterResolvDNS(dt, true)
-		if err != nil {
-			return "", err
+		if dns != nil {
+			var (
+				dnsNameservers   = resolvconf.GetNameservers(dt, types.IP)
+				dnsSearchDomains = resolvconf.GetSearchDomains(dt)
+				dnsOptions       = resolvconf.GetOptions(dt)
+			)
+			if len(dns.Nameservers) > 0 {
+				dnsNameservers = dns.Nameservers
+			}
+			if len(dns.SearchDomains) > 0 {
+				dnsSearchDomains = dns.SearchDomains
+			}
+			if len(dns.Options) > 0 {
+				dnsOptions = dns.Options
+			}
+
+			f, err = resolvconf.Build(p+".tmp", dnsNameservers, dnsSearchDomains, dnsOptions)
+			if err != nil {
+				return "", err
+			}
+		} else {
+			// Logic seems odd here: why are we filtering localhost IPs
+			// only if neither of the DNS configs were specified?
+			// Logic comes from https://github.com/docker/libnetwork/blob/164a77ee6d24fb2b1d61f8ad3403a51d8453899e/sandbox_dns_unix.go#L230-L269
+			f, err = resolvconf.FilterResolvDNS(f.Content, true)
+			if err != nil {
+				return "", err
+			}
 		}
 
-		if err := ioutil.WriteFile(p+".tmp", f.Content, 0644); err != nil {
+		tmpPath := p + ".tmp"
+		if err := ioutil.WriteFile(tmpPath, f.Content, 0644); err != nil {
 			return "", err
 		}
 
-		if err := os.Rename(p+".tmp", p); err != nil {
+		if idmap != nil {
+			root := idmap.RootPair()
+			if err := os.Chown(tmpPath, root.UID, root.GID); err != nil {
+				return "", err
+			}
+		}
+
+		if err := os.Rename(tmpPath, p); err != nil {
 			return "", err
 		}
 		return "", nil

+ 13 - 0
vendor/github.com/moby/buildkit/executor/oci/spec.go

@@ -0,0 +1,13 @@
+package oci
+
+// ProcMode configures PID namespaces
+type ProcessMode int
+
+const (
+	// ProcessSandbox unshares pidns and mount procfs.
+	ProcessSandbox ProcessMode = iota
+	// NoProcessSandbox uses host pidns and bind-mount procfs.
+	// Note that NoProcessSandbox allows build containers to kill (and potentially ptrace) an arbitrary process in the BuildKit host namespace.
+	// NoProcessSandbox should be enabled only when the BuildKit is running in a container as an unprivileged user.
+	NoProcessSandbox
+)

+ 5 - 17
vendor/github.com/moby/buildkit/executor/oci/spec_unix.go

@@ -27,18 +27,6 @@ import (
 
 // Ideally we don't have to import whole containerd just for the default spec
 
-// ProcMode configures PID namespaces
-type ProcessMode int
-
-const (
-	// ProcessSandbox unshares pidns and mount procfs.
-	ProcessSandbox ProcessMode = iota
-	// NoProcessSandbox uses host pidns and bind-mount procfs.
-	// Note that NoProcessSandbox allows build containers to kill (and potentially ptrace) an arbitrary process in the BuildKit host namespace.
-	// NoProcessSandbox should be enabled only when the BuildKit is running in a container as an unprivileged user.
-	NoProcessSandbox
-)
-
 // GenerateSpec generates spec using containerd functionality.
 // opts are ignored for s.Process, s.Hostname, and s.Mounts .
 func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, namespace network.Namespace, processMode ProcessMode, idmap *idtools.IdentityMapping, opts ...oci.SpecOpts) (*specs.Spec, func(), error) {
@@ -113,11 +101,11 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou
 	}
 
 	if meta.SecurityMode == pb.SecurityMode_INSECURE {
-		//make sysfs rw mount for insecure mode.
-		for _, m := range s.Mounts {
-			if m.Type == "sysfs" {
-				m.Options = []string{"nosuid", "noexec", "nodev", "rw"}
-			}
+		if err = oci.WithWriteableCgroupfs(ctx, nil, c, s); err != nil {
+			return nil, nil, err
+		}
+		if err = oci.WithWriteableSysfs(ctx, nil, c, s); err != nil {
+			return nil, nil, err
 		}
 	}
 

+ 2 - 10
vendor/github.com/moby/buildkit/executor/oci/user.go

@@ -20,19 +20,11 @@ func GetUser(ctx context.Context, root, username string) (uint32, uint32, []uint
 		return uid, gid, nil, nil
 	}
 
-	passwdPath, err := user.GetPasswdPath()
-	if err != nil {
-		return 0, 0, nil, err
-	}
-	groupPath, err := user.GetGroupPath()
-	if err != nil {
-		return 0, 0, nil, err
-	}
-	passwdFile, err := openUserFile(root, passwdPath)
+	passwdFile, err := openUserFile(root, "/etc/passwd")
 	if err == nil {
 		defer passwdFile.Close()
 	}
-	groupFile, err := openUserFile(root, groupPath)
+	groupFile, err := openUserFile(root, "/etc/group")
 	if err == nil {
 		defer groupFile.Close()
 	}

+ 11 - 6
vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go

@@ -43,6 +43,7 @@ type Opt struct {
 	IdentityMapping *idtools.IdentityMapping
 	// runc run --no-pivot (unrecommended)
 	NoPivot bool
+	DNS     *oci.DNSConfig
 }
 
 var defaultCommandCandidates = []string{"buildkit-runc", "runc"}
@@ -57,6 +58,7 @@ type runcExecutor struct {
 	processMode      oci.ProcessMode
 	idmap            *idtools.IdentityMapping
 	noPivot          bool
+	dns              *oci.DNSConfig
 }
 
 func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Executor, error) {
@@ -79,7 +81,7 @@ func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Ex
 
 	root := opt.Root
 
-	if err := os.MkdirAll(root, 0700); err != nil {
+	if err := os.MkdirAll(root, 0711); err != nil {
 		return nil, errors.Wrapf(err, "failed to create %s", root)
 	}
 
@@ -115,6 +117,7 @@ func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Ex
 		processMode:      opt.ProcessMode,
 		idmap:            opt.IdentityMapping,
 		noPivot:          opt.NoPivot,
+		dns:              opt.DNS,
 	}
 	return w, nil
 }
@@ -134,12 +137,12 @@ func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache.
 		logrus.Info("enabling HostNetworking")
 	}
 
-	resolvConf, err := oci.GetResolvConf(ctx, w.root)
+	resolvConf, err := oci.GetResolvConf(ctx, w.root, w.idmap, w.dns)
 	if err != nil {
 		return err
 	}
 
-	hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts)
+	hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, w.idmap)
 	if err != nil {
 		return err
 	}
@@ -161,7 +164,7 @@ func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache.
 	id := identity.NewID()
 	bundle := filepath.Join(w.root, id)
 
-	if err := os.Mkdir(bundle, 0700); err != nil {
+	if err := os.Mkdir(bundle, 0711); err != nil {
 		return err
 	}
 	defer os.RemoveAll(bundle)
@@ -233,8 +236,10 @@ func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache.
 	if err != nil {
 		return errors.Wrapf(err, "working dir %s points to invalid target", newp)
 	}
-	if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil {
-		return errors.Wrapf(err, "failed to create working directory %s", newp)
+	if _, err := os.Stat(newp); err != nil {
+		if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil {
+			return errors.Wrapf(err, "failed to create working directory %s", newp)
+		}
 	}
 
 	if err := setOOMScoreAdj(spec); err != nil {

+ 1 - 1
vendor/github.com/moby/buildkit/exporter/tar/export.go

@@ -147,7 +147,7 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source)
 		fs = d.FS
 	}
 
-	w, err := filesync.CopyFileWriter(ctx, e.caller)
+	w, err := filesync.CopyFileWriter(ctx, nil, e.caller)
 	if err != nil {
 		return nil, err
 	}

+ 4 - 2
vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go

@@ -34,6 +34,7 @@ const (
 	keyFilename                = "filename"
 	keyCacheFrom               = "cache-from"    // for registry only. deprecated in favor of keyCacheImports
 	keyCacheImports            = "cache-imports" // JSON representation of []CacheOptionsEntry
+	keyCacheNS                 = "build-arg:BUILDKIT_CACHE_MOUNT_NS"
 	defaultDockerfileName      = "Dockerfile"
 	dockerignoreFilename       = ".dockerignore"
 	buildArgPrefix             = "build-arg:"
@@ -50,8 +51,8 @@ const (
 	keyContextSubDir           = "contextsubdir"
 )
 
-var httpPrefix = regexp.MustCompile("^https?://")
-var gitUrlPathWithFragmentSuffix = regexp.MustCompile("\\.git(?:#.+)?$")
+var httpPrefix = regexp.MustCompile(`^https?://`)
+var gitUrlPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
 
 func Build(ctx context.Context, c client.Client) (*client.Result, error) {
 	opts := c.BuildOpts().Opts
@@ -322,6 +323,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
 					MetaResolver:      c,
 					BuildArgs:         filter(opts, buildArgPrefix),
 					Labels:            filter(opts, labelPrefix),
+					CacheIDNamespace:  opts[keyCacheNS],
 					SessionID:         c.BuildOpts().SessionID,
 					BuildContext:      buildContext,
 					Excludes:          excludes,

+ 19 - 28
vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go

@@ -461,7 +461,7 @@ type dispatchOpt struct {
 func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error {
 	if ex, ok := cmd.Command.(instructions.SupportsSingleWordExpansion); ok {
 		err := ex.Expand(func(word string) (string, error) {
-			return opt.shlex.ProcessWordWithMap(word, toEnvMap(d.buildArgs, d.image.Config.Env))
+			return opt.shlex.ProcessWord(word, d.state.Env())
 		})
 		if err != nil {
 			return err
@@ -626,14 +626,7 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE
 		args = withShell(d.image, args)
 	}
 	env := d.state.Env()
-	opt := []llb.RunOption{llb.Args(args)}
-	for _, arg := range d.buildArgs {
-		if arg.Value != nil {
-			env = append(env, fmt.Sprintf("%s=%s", arg.Key, arg.ValueString()))
-			opt = append(opt, llb.AddEnv(arg.Key, arg.ValueString()))
-		}
-	}
-	opt = append(opt, dfCmd(c))
+	opt := []llb.RunOption{llb.Args(args), dfCmd(c)}
 	if d.ignoreCache {
 		opt = append(opt, llb.IgnoreCache)
 	}
@@ -647,6 +640,11 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE
 	}
 	opt = append(opt, runMounts...)
 
+	err = dispatchRunSecurity(d, c)
+	if err != nil {
+		return err
+	}
+
 	shlex := *dopt.shlex
 	shlex.RawQuotes = true
 	shlex.SkipUnsetEnv = true
@@ -656,7 +654,7 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE
 		opt = append(opt, llb.AddExtraHost(h.Host, h.IP))
 	}
 	d.state = d.state.Run(opt...).Root()
-	return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs), true, &d.state)
+	return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs, shell.BuildEnvs(env)), true, &d.state)
 }
 
 func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bool, opt *dispatchOpt) error {
@@ -927,7 +925,7 @@ func dispatchHealthcheck(d *dispatchState, c *instructions.HealthCheckCommand) e
 func dispatchExpose(d *dispatchState, c *instructions.ExposeCommand, shlex *shell.Lex) error {
 	ports := []string{}
 	for _, p := range c.Ports {
-		ps, err := shlex.ProcessWordsWithMap(p, toEnvMap(d.buildArgs, d.image.Config.Env))
+		ps, err := shlex.ProcessWords(p, d.state.Env())
 		if err != nil {
 			return err
 		}
@@ -1000,6 +998,10 @@ func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instru
 		}
 	}
 
+	if buildArg.Value != nil {
+		d.state = d.state.AddEnv(buildArg.Key, *buildArg.Value)
+	}
+
 	d.buildArgs = append(d.buildArgs, buildArg)
 	return commitToHistory(&d.image, commitStr, false, nil)
 }
@@ -1065,21 +1067,6 @@ func setKVValue(kvpo instructions.KeyValuePairOptional, values map[string]string
 	return kvpo
 }
 
-func toEnvMap(args []instructions.KeyValuePairOptional, env []string) map[string]string {
-	m := shell.BuildEnvs(env)
-
-	for _, arg := range args {
-		// If key already exists, keep previous value.
-		if _, ok := m[arg.Key]; ok {
-			continue
-		}
-		if arg.Value != nil {
-			m[arg.Key] = arg.ValueString()
-		}
-	}
-	return m
-}
-
 func dfCmd(cmd interface{}) llb.ConstraintsOpt {
 	// TODO: add fmt.Stringer to instructions.Command to remove interface{}
 	var cmdStr string
@@ -1094,10 +1081,14 @@ func dfCmd(cmd interface{}) llb.ConstraintsOpt {
 	})
 }
 
-func runCommandString(args []string, buildArgs []instructions.KeyValuePairOptional) string {
+func runCommandString(args []string, buildArgs []instructions.KeyValuePairOptional, envMap map[string]string) string {
 	var tmpBuildEnv []string
 	for _, arg := range buildArgs {
-		tmpBuildEnv = append(tmpBuildEnv, arg.Key+"="+arg.ValueString())
+		v, ok := envMap[arg.Key]
+		if !ok {
+			v = arg.ValueString()
+		}
+		tmpBuildEnv = append(tmpBuildEnv, arg.Key+"="+v)
 	}
 	if len(tmpBuildEnv) > 0 {
 		tmpBuildEnv = append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...)

+ 11 - 0
vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunsecurity.go

@@ -0,0 +1,11 @@
+// +build !dfrunsecurity
+
+package dockerfile2llb
+
+import (
+	"github.com/moby/buildkit/frontend/dockerfile/instructions"
+)
+
+func dispatchRunSecurity(d *dispatchState, c *instructions.RunCommand) error {
+	return nil
+}

+ 6 - 1
vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go

@@ -124,6 +124,9 @@ func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*
 			if mount.CacheSharing == instructions.MountSharingLocked {
 				sharing = llb.CacheMountLocked
 			}
+			if mount.CacheID == "" {
+				mount.CacheID = path.Clean(mount.Target)
+			}
 			mountOpts = append(mountOpts, llb.AsPersistentCacheDir(opt.cacheIDNamespace+"/"+mount.CacheID, sharing))
 		}
 		target := mount.Target
@@ -144,7 +147,9 @@ func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*
 
 		out = append(out, llb.AddMount(target, st, mountOpts...))
 
-		d.ctxPaths[path.Join("/", filepath.ToSlash(mount.Source))] = struct{}{}
+		if mount.From == "" {
+			d.ctxPaths[path.Join("/", filepath.ToSlash(mount.Source))] = struct{}{}
+		}
 	}
 	return out, nil
 }

+ 27 - 0
vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runsecurity.go

@@ -0,0 +1,27 @@
+// +build dfrunsecurity
+
+package dockerfile2llb
+
+import (
+	"github.com/pkg/errors"
+
+	"github.com/moby/buildkit/frontend/dockerfile/instructions"
+	"github.com/moby/buildkit/solver/pb"
+)
+
+func dispatchRunSecurity(d *dispatchState, c *instructions.RunCommand) error {
+	security := instructions.GetSecurity(c)
+
+	for _, sec := range security {
+		switch sec {
+		case instructions.SecurityInsecure:
+			d.state = d.state.Security(pb.SecurityMode_INSECURE)
+		case instructions.SecuritySandbox:
+			d.state = d.state.Security(pb.SecurityMode_SANDBOX)
+		default:
+			return errors.Errorf("unsupported security mode %q", sec)
+		}
+	}
+
+	return nil
+}

+ 12 - 0
vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go

@@ -142,6 +142,8 @@ func parseMount(value string) (*Mount, error) {
 				if m.Type == "secret" || m.Type == "ssh" {
 					m.Required = true
 					continue
+				} else {
+					return nil, errors.Errorf("unexpected key '%s' for mount type '%s'", key, m.Type)
 				}
 			}
 		}
@@ -176,6 +178,16 @@ func parseMount(value string) (*Mount, error) {
 			}
 			m.ReadOnly = !rw
 			roAuto = false
+		case "required":
+			if m.Type == "secret" || m.Type == "ssh" {
+				v, err := strconv.ParseBool(value)
+				if err != nil {
+					return nil, errors.Errorf("invalid value for %s: %s", key, value)
+				}
+				m.Required = v
+			} else {
+				return nil, errors.Errorf("unexpected key '%s' for mount type '%s'", key, m.Type)
+			}
 		case "id":
 			m.CacheID = value
 		case "sharing":

+ 83 - 0
vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runsecurity.go

@@ -0,0 +1,83 @@
+// +build dfrunsecurity
+
+package instructions
+
+import (
+	"encoding/csv"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+const (
+	SecurityInsecure = "insecure"
+	SecuritySandbox  = "sandbox"
+)
+
+var allowedSecurity = map[string]struct{}{
+	SecurityInsecure: {},
+	SecuritySandbox:  {},
+}
+
+func isValidSecurity(value string) bool {
+	_, ok := allowedSecurity[value]
+	return ok
+}
+
+type securityKeyT string
+
+var securityKey = securityKeyT("dockerfile/run/security")
+
+func init() {
+	parseRunPreHooks = append(parseRunPreHooks, runSecurityPreHook)
+	parseRunPostHooks = append(parseRunPostHooks, runSecurityPostHook)
+}
+
+func runSecurityPreHook(cmd *RunCommand, req parseRequest) error {
+	st := &securityState{}
+	st.flag = req.flags.AddStrings("security")
+	cmd.setExternalValue(securityKey, st)
+	return nil
+}
+
+func runSecurityPostHook(cmd *RunCommand, req parseRequest) error {
+	st := getSecurityState(cmd)
+	if st == nil {
+		return errors.Errorf("no security state")
+	}
+
+	for _, value := range st.flag.StringValues {
+		csvReader := csv.NewReader(strings.NewReader(value))
+		fields, err := csvReader.Read()
+		if err != nil {
+			return errors.Wrap(err, "failed to parse csv security")
+		}
+
+		for _, field := range fields {
+			if !isValidSecurity(field) {
+				return errors.Errorf("security %q is not valid", field)
+			}
+
+			st.security = append(st.security, field)
+		}
+	}
+
+	return nil
+}
+
+func getSecurityState(cmd *RunCommand) *securityState {
+	v := cmd.getExternalValue(securityKey)
+	if v == nil {
+		return nil
+	}
+	return v.(*securityState)
+}
+
+func GetSecurity(cmd *RunCommand) []string {
+	return getSecurityState(cmd).security
+}
+
+type securityState struct {
+	flag     *Flag
+	security []string
+}

+ 1 - 4
vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go

@@ -417,10 +417,7 @@ func BuildEnvs(env []string) map[string]string {
 			k := e[:i]
 			v := e[i+1:]
 
-			// If key already exists, keep previous value.
-			if _, ok := envs[k]; ok {
-				continue
-			}
+			// overwrite value if key already exists
 			envs[k] = v
 		}
 	}

+ 1 - 1
vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go

@@ -128,7 +128,7 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro
 				}
 			}
 			if retError != nil {
-				st, _ := status.FromError(retError)
+				st, _ := status.FromError(errors.Cause(retError))
 				stp := st.Proto()
 				req.Error = &rpc.Status{
 					Code:    stp.Code,

+ 3 - 2
vendor/github.com/moby/buildkit/session/auth/auth.go

@@ -4,6 +4,7 @@ import (
 	"context"
 
 	"github.com/moby/buildkit/session"
+	"github.com/pkg/errors"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
 )
@@ -16,10 +17,10 @@ func CredentialsFunc(ctx context.Context, c session.Caller) func(string) (string
 			Host: host,
 		})
 		if err != nil {
-			if st, ok := status.FromError(err); ok && st.Code() == codes.Unimplemented {
+			if st, ok := status.FromError(errors.Cause(err)); ok && st.Code() == codes.Unimplemented {
 				return "", "", nil
 			}
-			return "", "", err
+			return "", "", errors.WithStack(err)
 		}
 		return resp.Username, resp.Secret, nil
 	}

+ 16 - 9
vendor/github.com/moby/buildkit/session/content/caller.go

@@ -9,6 +9,7 @@ import (
 	"github.com/moby/buildkit/session"
 	digest "github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
 	"google.golang.org/grpc/metadata"
 )
 
@@ -31,47 +32,53 @@ func (cs *callerContentStore) choose(ctx context.Context) context.Context {
 
 func (cs *callerContentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
 	ctx = cs.choose(ctx)
-	return cs.store.Info(ctx, dgst)
+	info, err := cs.store.Info(ctx, dgst)
+	return info, errors.WithStack(err)
 }
 
 func (cs *callerContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
 	ctx = cs.choose(ctx)
-	return cs.store.Update(ctx, info, fieldpaths...)
+	info, err := cs.store.Update(ctx, info, fieldpaths...)
+	return info, errors.WithStack(err)
 }
 
 func (cs *callerContentStore) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error {
 	ctx = cs.choose(ctx)
-	return cs.store.Walk(ctx, fn, fs...)
+	return errors.WithStack(cs.store.Walk(ctx, fn, fs...))
 }
 
 func (cs *callerContentStore) Delete(ctx context.Context, dgst digest.Digest) error {
 	ctx = cs.choose(ctx)
-	return cs.store.Delete(ctx, dgst)
+	return errors.WithStack(cs.store.Delete(ctx, dgst))
 }
 
 func (cs *callerContentStore) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) {
 	ctx = cs.choose(ctx)
-	return cs.store.ListStatuses(ctx, fs...)
+	resp, err := cs.store.ListStatuses(ctx, fs...)
+	return resp, errors.WithStack(err)
 }
 
 func (cs *callerContentStore) Status(ctx context.Context, ref string) (content.Status, error) {
 	ctx = cs.choose(ctx)
-	return cs.store.Status(ctx, ref)
+	st, err := cs.store.Status(ctx, ref)
+	return st, errors.WithStack(err)
 }
 
 func (cs *callerContentStore) Abort(ctx context.Context, ref string) error {
 	ctx = cs.choose(ctx)
-	return cs.store.Abort(ctx, ref)
+	return errors.WithStack(cs.store.Abort(ctx, ref))
 }
 
 func (cs *callerContentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
 	ctx = cs.choose(ctx)
-	return cs.store.Writer(ctx, opts...)
+	w, err := cs.store.Writer(ctx, opts...)
+	return w, errors.WithStack(err)
 }
 
 func (cs *callerContentStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
 	ctx = cs.choose(ctx)
-	return cs.store.ReaderAt(ctx, desc)
+	ra, err := cs.store.ReaderAt(ctx, desc)
+	return ra, errors.WithStack(err)
 }
 
 // NewCallerStore creates content.Store from session.Caller with specified storeID

+ 18 - 12
vendor/github.com/moby/buildkit/session/filesync/diffcopy.go

@@ -14,7 +14,7 @@ import (
 )
 
 func sendDiffCopy(stream grpc.Stream, fs fsutil.FS, progress progressCb) error {
-	return fsutil.Send(stream.Context(), stream, fs, progress)
+	return errors.WithStack(fsutil.Send(stream.Context(), stream, fs, progress))
 }
 
 func newStreamWriter(stream grpc.ClientStream) io.WriteCloser {
@@ -29,7 +29,7 @@ type bufferedWriteCloser struct {
 
 func (bwc *bufferedWriteCloser) Close() error {
 	if err := bwc.Writer.Flush(); err != nil {
-		return err
+		return errors.WithStack(err)
 	}
 	return bwc.Closer.Close()
 }
@@ -40,19 +40,25 @@ type streamWriterCloser struct {
 
 func (wc *streamWriterCloser) Write(dt []byte) (int, error) {
 	if err := wc.ClientStream.SendMsg(&BytesMessage{Data: dt}); err != nil {
-		return 0, err
+		// SendMsg return EOF on remote errors
+		if errors.Cause(err) == io.EOF {
+			if err := errors.WithStack(wc.ClientStream.RecvMsg(struct{}{})); err != nil {
+				return 0, err
+			}
+		}
+		return 0, errors.WithStack(err)
 	}
 	return len(dt), nil
 }
 
 func (wc *streamWriterCloser) Close() error {
 	if err := wc.ClientStream.CloseSend(); err != nil {
-		return err
+		return errors.WithStack(err)
 	}
 	// block until receiver is done
 	var bm BytesMessage
 	if err := wc.ClientStream.RecvMsg(&bm); err != io.EOF {
-		return err
+		return errors.WithStack(err)
 	}
 	return nil
 }
@@ -69,19 +75,19 @@ func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progres
 		cf = cu.HandleChange
 		ch = cu.ContentHasher()
 	}
-	return fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{
+	return errors.WithStack(fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{
 		NotifyHashed:  cf,
 		ContentHasher: ch,
 		ProgressCb:    progress,
 		Filter:        fsutil.FilterFunc(filter),
-	})
+	}))
 }
 
 func syncTargetDiffCopy(ds grpc.Stream, dest string) error {
 	if err := os.MkdirAll(dest, 0700); err != nil {
-		return err
+		return errors.Wrapf(err, "failed to create synctarget dest dir %s", dest)
 	}
-	return fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{
+	return errors.WithStack(fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{
 		Merge: true,
 		Filter: func() func(string, *fstypes.Stat) bool {
 			uid := os.Getuid()
@@ -92,7 +98,7 @@ func syncTargetDiffCopy(ds grpc.Stream, dest string) error {
 				return true
 			}
 		}(),
-	})
+	}))
 }
 
 func writeTargetFile(ds grpc.Stream, wc io.WriteCloser) error {
@@ -102,10 +108,10 @@ func writeTargetFile(ds grpc.Stream, wc io.WriteCloser) error {
 			if errors.Cause(err) == io.EOF {
 				return nil
 			}
-			return err
+			return errors.WithStack(err)
 		}
 		if _, err := wc.Write(bm.Data); err != nil {
-			return err
+			return errors.WithStack(err)
 		}
 	}
 }

+ 38 - 15
vendor/github.com/moby/buildkit/session/filesync/filesync.go

@@ -18,11 +18,12 @@ import (
 )
 
 const (
-	keyOverrideExcludes = "override-excludes"
-	keyIncludePatterns  = "include-patterns"
-	keyExcludePatterns  = "exclude-patterns"
-	keyFollowPaths      = "followpaths"
-	keyDirName          = "dir-name"
+	keyOverrideExcludes   = "override-excludes"
+	keyIncludePatterns    = "include-patterns"
+	keyExcludePatterns    = "exclude-patterns"
+	keyFollowPaths        = "followpaths"
+	keyDirName            = "dir-name"
+	keyExporterMetaPrefix = "exporter-md-"
 )
 
 type fsSyncProvider struct {
@@ -238,16 +239,16 @@ func NewFSSyncTargetDir(outdir string) session.Attachable {
 }
 
 // NewFSSyncTarget allows writing into an io.WriteCloser
-func NewFSSyncTarget(w io.WriteCloser) session.Attachable {
+func NewFSSyncTarget(f func(map[string]string) (io.WriteCloser, error)) session.Attachable {
 	p := &fsSyncTarget{
-		outfile: w,
+		f: f,
 	}
 	return p
 }
 
 type fsSyncTarget struct {
-	outdir  string
-	outfile io.WriteCloser
+	outdir string
+	f      func(map[string]string) (io.WriteCloser, error)
 }
 
 func (sp *fsSyncTarget) Register(server *grpc.Server) {
@@ -258,11 +259,26 @@ func (sp *fsSyncTarget) DiffCopy(stream FileSend_DiffCopyServer) error {
 	if sp.outdir != "" {
 		return syncTargetDiffCopy(stream, sp.outdir)
 	}
-	if sp.outfile == nil {
+
+	if sp.f == nil {
 		return errors.New("empty outfile and outdir")
 	}
-	defer sp.outfile.Close()
-	return writeTargetFile(stream, sp.outfile)
+	opts, _ := metadata.FromIncomingContext(stream.Context()) // if no metadata continue with empty object
+	md := map[string]string{}
+	for k, v := range opts {
+		if strings.HasPrefix(k, keyExporterMetaPrefix) {
+			md[strings.TrimPrefix(k, keyExporterMetaPrefix)] = strings.Join(v, ",")
+		}
+	}
+	wc, err := sp.f(md)
+	if err != nil {
+		return err
+	}
+	if wc == nil {
+		return status.Errorf(codes.AlreadyExists, "target already exists")
+	}
+	defer wc.Close()
+	return writeTargetFile(stream, wc)
 }
 
 func CopyToCaller(ctx context.Context, fs fsutil.FS, c session.Caller, progress func(int, bool)) error {
@@ -275,13 +291,13 @@ func CopyToCaller(ctx context.Context, fs fsutil.FS, c session.Caller, progress
 
 	cc, err := client.DiffCopy(ctx)
 	if err != nil {
-		return err
+		return errors.WithStack(err)
 	}
 
 	return sendDiffCopy(cc, fs, progress)
 }
 
-func CopyFileWriter(ctx context.Context, c session.Caller) (io.WriteCloser, error) {
+func CopyFileWriter(ctx context.Context, md map[string]string, c session.Caller) (io.WriteCloser, error) {
 	method := session.MethodURL(_FileSend_serviceDesc.ServiceName, "diffcopy")
 	if !c.Supports(method) {
 		return nil, errors.Errorf("method %s not supported by the client", method)
@@ -289,9 +305,16 @@ func CopyFileWriter(ctx context.Context, c session.Caller) (io.WriteCloser, erro
 
 	client := NewFileSendClient(c.Conn())
 
+	opts := make(map[string][]string, len(md))
+	for k, v := range md {
+		opts[keyExporterMetaPrefix+k] = []string{v}
+	}
+
+	ctx = metadata.NewOutgoingContext(ctx, opts)
+
 	cc, err := client.DiffCopy(ctx)
 	if err != nil {
-		return nil, err
+		return nil, errors.WithStack(err)
 	}
 
 	return newStreamWriter(cc), nil

+ 2 - 2
vendor/github.com/moby/buildkit/session/secrets/secrets.go

@@ -21,10 +21,10 @@ func GetSecret(ctx context.Context, c session.Caller, id string) ([]byte, error)
 		ID: id,
 	})
 	if err != nil {
-		if st, ok := status.FromError(err); ok && (st.Code() == codes.Unimplemented || st.Code() == codes.NotFound) {
+		if st, ok := status.FromError(errors.Cause(err)); ok && (st.Code() == codes.Unimplemented || st.Code() == codes.NotFound) {
 			return nil, errors.Wrapf(ErrNotFound, "secret %s not found", id)
 		}
-		return nil, err
+		return nil, errors.WithStack(err)
 	}
 	return resp.Data, nil
 }

+ 5 - 4
vendor/github.com/moby/buildkit/session/sshforward/copy.go

@@ -3,6 +3,7 @@ package sshforward
 import (
 	io "io"
 
+	"github.com/pkg/errors"
 	context "golang.org/x/net/context"
 	"golang.org/x/sync/errgroup"
 	"google.golang.org/grpc"
@@ -19,7 +20,7 @@ func Copy(ctx context.Context, conn io.ReadWriteCloser, stream grpc.Stream) erro
 					return nil
 				}
 				conn.Close()
-				return err
+				return errors.WithStack(err)
 			}
 			select {
 			case <-ctx.Done():
@@ -29,7 +30,7 @@ func Copy(ctx context.Context, conn io.ReadWriteCloser, stream grpc.Stream) erro
 			}
 			if _, err := conn.Write(p.Data); err != nil {
 				conn.Close()
-				return err
+				return errors.WithStack(err)
 			}
 			p.Data = p.Data[:0]
 		}
@@ -43,7 +44,7 @@ func Copy(ctx context.Context, conn io.ReadWriteCloser, stream grpc.Stream) erro
 			case err == io.EOF:
 				return nil
 			case err != nil:
-				return err
+				return errors.WithStack(err)
 			}
 			select {
 			case <-ctx.Done():
@@ -52,7 +53,7 @@ func Copy(ctx context.Context, conn io.ReadWriteCloser, stream grpc.Stream) erro
 			}
 			p := &BytesMessage{Data: buf[:n]}
 			if err := stream.SendMsg(p); err != nil {
-				return err
+				return errors.WithStack(err)
 			}
 		}
 	})

+ 7 - 6
vendor/github.com/moby/buildkit/session/sshforward/ssh.go

@@ -7,6 +7,7 @@ import (
 	"path/filepath"
 
 	"github.com/moby/buildkit/session"
+	"github.com/pkg/errors"
 	context "golang.org/x/net/context"
 	"golang.org/x/sync/errgroup"
 	"google.golang.org/grpc/metadata"
@@ -65,7 +66,7 @@ type SocketOpt struct {
 func MountSSHSocket(ctx context.Context, c session.Caller, opt SocketOpt) (sockPath string, closer func() error, err error) {
 	dir, err := ioutil.TempDir("", ".buildkit-ssh-sock")
 	if err != nil {
-		return "", nil, err
+		return "", nil, errors.WithStack(err)
 	}
 
 	defer func() {
@@ -78,16 +79,16 @@ func MountSSHSocket(ctx context.Context, c session.Caller, opt SocketOpt) (sockP
 
 	l, err := net.Listen("unix", sockPath)
 	if err != nil {
-		return "", nil, err
+		return "", nil, errors.WithStack(err)
 	}
 
 	if err := os.Chown(sockPath, opt.UID, opt.GID); err != nil {
 		l.Close()
-		return "", nil, err
+		return "", nil, errors.WithStack(err)
 	}
 	if err := os.Chmod(sockPath, os.FileMode(opt.Mode)); err != nil {
 		l.Close()
-		return "", nil, err
+		return "", nil, errors.WithStack(err)
 	}
 
 	s := &server{caller: c}
@@ -102,12 +103,12 @@ func MountSSHSocket(ctx context.Context, c session.Caller, opt SocketOpt) (sockP
 	return sockPath, func() error {
 		err := l.Close()
 		os.RemoveAll(sockPath)
-		return err
+		return errors.WithStack(err)
 	}, nil
 }
 
 func CheckSSHID(ctx context.Context, c session.Caller, id string) error {
 	client := NewSSHClient(c.Conn())
 	_, err := client.CheckAgent(ctx, &CheckAgentRequest{ID: id})
-	return err
+	return errors.WithStack(err)
 }

+ 4 - 3
vendor/github.com/moby/buildkit/session/upload/upload.go

@@ -6,6 +6,7 @@ import (
 	"net/url"
 
 	"github.com/moby/buildkit/session"
+	"github.com/pkg/errors"
 	"google.golang.org/grpc/metadata"
 )
 
@@ -26,7 +27,7 @@ func New(ctx context.Context, c session.Caller, url *url.URL) (*Upload, error) {
 
 	cc, err := client.Pull(ctx)
 	if err != nil {
-		return nil, err
+		return nil, errors.WithStack(err)
 	}
 
 	return &Upload{cc: cc}, nil
@@ -44,12 +45,12 @@ func (u *Upload) WriteTo(w io.Writer) (int, error) {
 			if err == io.EOF {
 				return n, nil
 			}
-			return n, err
+			return n, errors.WithStack(err)
 		}
 		nn, err := w.Write(bm.Data)
 		n += nn
 		if err != nil {
-			return n, err
+			return n, errors.WithStack(err)
 		}
 	}
 }

+ 6 - 4
vendor/github.com/moby/buildkit/solver/edge.go

@@ -331,7 +331,8 @@ func (e *edge) unpark(incoming []pipe.Sender, updates, allPipes []pipe.Receiver,
 	if e.cacheMapReq == nil && (e.cacheMap == nil || len(e.cacheRecords) == 0) {
 		index := e.cacheMapIndex
 		e.cacheMapReq = f.NewFuncRequest(func(ctx context.Context) (interface{}, error) {
-			return e.op.CacheMap(ctx, index)
+			cm, err := e.op.CacheMap(ctx, index)
+			return cm, errors.Wrap(err, "failed to load cache key")
 		})
 		cacheMapReq = true
 	}
@@ -798,7 +799,8 @@ func (e *edge) createInputRequests(desiredState edgeStatusType, f *pipeFactory,
 			res := dep.result
 			func(fn ResultBasedCacheFunc, res Result, index Index) {
 				dep.slowCacheReq = f.NewFuncRequest(func(ctx context.Context) (interface{}, error) {
-					return e.op.CalcSlowCache(ctx, index, fn, res)
+					v, err := e.op.CalcSlowCache(ctx, index, fn, res)
+					return v, errors.Wrap(err, "failed to compute cache key")
 				})
 			}(fn, res, dep.index)
 			addedNew = true
@@ -850,7 +852,7 @@ func (e *edge) loadCache(ctx context.Context) (interface{}, error) {
 	logrus.Debugf("load cache for %s with %s", e.edge.Vertex.Name(), rec.ID)
 	res, err := e.op.LoadCache(ctx, rec)
 	if err != nil {
-		return nil, err
+		return nil, errors.Wrap(err, "failed to load cache")
 	}
 
 	return NewCachedResult(res, []ExportableCacheKey{{CacheKey: rec.key, Exporter: &exporter{k: rec.key, record: rec, edge: e}}}), nil
@@ -861,7 +863,7 @@ func (e *edge) execOp(ctx context.Context) (interface{}, error) {
 	cacheKeys, inputs := e.commitOptions()
 	results, subExporters, err := e.op.Exec(ctx, toResultSlice(inputs))
 	if err != nil {
-		return nil, err
+		return nil, errors.WithStack(err)
 	}
 
 	index := e.edge.Index

+ 19 - 4
vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go

@@ -29,6 +29,7 @@ type llbBridge struct {
 	builder                   solver.Builder
 	frontends                 map[string]frontend.Frontend
 	resolveWorker             func() (worker.Worker, error)
+	eachWorker                func(func(worker.Worker) error) error
 	resolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc
 	cms                       map[string]solver.CacheManager
 	cmsMu                     sync.Mutex
@@ -91,14 +92,28 @@ func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res *
 		if err != nil {
 			return nil, err
 		}
+		dpc := &detectPrunedCacheID{}
 
-		edge, err := Load(req.Definition, ValidateEntitlements(ent), WithCacheSources(cms), RuntimePlatforms(b.platforms), WithValidateCaps())
+		edge, err := Load(req.Definition, dpc.Load, ValidateEntitlements(ent), WithCacheSources(cms), RuntimePlatforms(b.platforms), WithValidateCaps())
 		if err != nil {
-			return nil, err
+			return nil, errors.Wrap(err, "failed to load LLB")
+		}
+
+		if len(dpc.ids) > 0 {
+			ids := make([]string, 0, len(dpc.ids))
+			for id := range dpc.ids {
+				ids = append(ids, id)
+			}
+			if err := b.eachWorker(func(w worker.Worker) error {
+				return w.PruneCacheMounts(ctx, ids)
+			}); err != nil {
+				return nil, err
+			}
 		}
+
 		ref, err := b.builder.Build(ctx, edge)
 		if err != nil {
-			return nil, err
+			return nil, errors.Wrap(err, "failed to build LLB")
 		}
 
 		res = &frontend.Result{Ref: ref}
@@ -109,7 +124,7 @@ func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res *
 		}
 		res, err = f.Solve(ctx, b, req.FrontendOpt)
 		if err != nil {
-			return nil, err
+			return nil, errors.Wrapf(err, "failed to solve with frontend %s", req.Frontend)
 		}
 	} else {
 		return &frontend.Result{}, nil

+ 1 - 6
vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go

@@ -27,13 +27,9 @@ func timestampToTime(ts int64) *time.Time {
 }
 
 func mapUser(user *copy.ChownOpt, idmap *idtools.IdentityMapping) (*copy.ChownOpt, error) {
-	if idmap == nil {
+	if idmap == nil || user == nil {
 		return user, nil
 	}
-	if user == nil {
-		identity := idmap.RootPair()
-		return &copy.ChownOpt{Uid: identity.UID, Gid: identity.GID}, nil
-	}
 	identity, err := idmap.ToHost(idtools.Identity{
 		UID: user.Uid,
 		GID: user.Gid,
@@ -138,7 +134,6 @@ func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *
 		return nil
 	}
 
-	// TODO(tonistiigi): this is wrong. fsutil.Copy can't handle non-forced user
 	u, err := mapUser(u, idmap)
 	if err != nil {
 		return err

+ 4 - 0
vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go

@@ -10,6 +10,7 @@ import (
 	"github.com/moby/buildkit/frontend"
 	"github.com/moby/buildkit/snapshot"
 	"github.com/moby/buildkit/solver"
+	"github.com/moby/buildkit/solver/llbsolver"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/worker"
 	digest "github.com/opencontainers/go-digest"
@@ -25,6 +26,9 @@ type buildOp struct {
 }
 
 func NewBuildOp(v solver.Vertex, op *pb.Op_Build, b frontend.FrontendLLBBridge, _ worker.Worker) (solver.Op, error) {
+	if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil {
+		return nil, err
+	}
 	return &buildOp{
 		op: op.Build,
 		b:  b,

+ 17 - 5
vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go

@@ -60,6 +60,9 @@ type execOp struct {
 }
 
 func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache.Manager, sm *session.Manager, md *metadata.Store, exec executor.Executor, w worker.Worker) (solver.Op, error) {
+	if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil {
+		return nil, err
+	}
 	return &execOp{
 		op:          op.Exec,
 		cm:          cm,
@@ -218,11 +221,13 @@ func (e *execOp) getMountDeps() ([]dep, error) {
 }
 
 func (e *execOp) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, m *pb.Mount, sharing pb.CacheSharingOpt) (mref cache.MutableRef, err error) {
-
 	key := "cache-dir:" + id
 	if ref != nil {
 		key += ":" + ref.ID()
 	}
+	mu := CacheMountsLocker()
+	mu.Lock()
+	defer mu.Unlock()
 
 	if ref, ok := e.cacheMounts[key]; ok {
 		return ref.clone(), nil
@@ -324,7 +329,7 @@ func (e *execOp) getSSHMountable(ctx context.Context, m *pb.Mount) (cache.Mounta
 		if m.SSHOpt.Optional {
 			return nil, nil
 		}
-		if st, ok := status.FromError(err); ok && st.Code() == codes.Unimplemented {
+		if st, ok := status.FromError(errors.Cause(err)); ok && st.Code() == codes.Unimplemented {
 			return nil, errors.Errorf("no SSH key %q forwarded from the client", m.SSHOpt.ID)
 		}
 		return nil, err
@@ -789,10 +794,17 @@ type cacheRefs struct {
 	shares map[string]*cacheRefShare
 }
 
-func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache.MutableRef, error) {
-	r.mu.Lock()
-	defer r.mu.Unlock()
+// ClearActiveCacheMounts clears shared cache mounts currently in use.
+// Caller needs to hold CacheMountsLocker before calling
+func ClearActiveCacheMounts() {
+	sharedCacheRefs.shares = nil
+}
 
+func CacheMountsLocker() sync.Locker {
+	return &sharedCacheRefs.mu
+}
+
+func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache.MutableRef, error) {
 	if r.shares == nil {
 		r.shares = map[string]*cacheRefShare{}
 	}

+ 3 - 0
vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go

@@ -35,6 +35,9 @@ type fileOp struct {
 }
 
 func NewFileOp(v solver.Vertex, op *pb.Op_File, cm cache.Manager, md *metadata.Store, w worker.Worker) (solver.Op, error) {
+	if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil {
+		return nil, err
+	}
 	return &fileOp{
 		op:        op.File,
 		md:        md,

+ 4 - 0
vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go

@@ -7,6 +7,7 @@ import (
 
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/solver"
+	"github.com/moby/buildkit/solver/llbsolver"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/source"
 	"github.com/moby/buildkit/worker"
@@ -26,6 +27,9 @@ type sourceOp struct {
 }
 
 func NewSourceOp(_ solver.Vertex, op *pb.Op_Source, platform *pb.Platform, sm *source.Manager, sessM *session.Manager, w worker.Worker) (solver.Op, error) {
+	if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil {
+		return nil, err
+	}
 	return &sourceOp{
 		op:       op,
 		sm:       sm,

+ 17 - 0
vendor/github.com/moby/buildkit/solver/llbsolver/solver.go

@@ -39,6 +39,7 @@ type Solver struct {
 	workerController          *worker.Controller
 	solver                    *solver.Solver
 	resolveWorker             ResolveWorkerFunc
+	eachWorker                func(func(worker.Worker) error) error
 	frontends                 map[string]frontend.Frontend
 	resolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc
 	platforms                 []specs.Platform
@@ -51,6 +52,7 @@ func New(wc *worker.Controller, f map[string]frontend.Frontend, cache solver.Cac
 	s := &Solver{
 		workerController:          wc,
 		resolveWorker:             defaultResolver(wc),
+		eachWorker:                allWorkers(wc),
 		frontends:                 f,
 		resolveCacheImporterFuncs: resolveCI,
 		gatewayForwarder:          gatewayForwarder,
@@ -87,6 +89,7 @@ func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge {
 		builder:                   b,
 		frontends:                 s.frontends,
 		resolveWorker:             s.resolveWorker,
+		eachWorker:                s.eachWorker,
 		resolveCacheImporterFuncs: s.resolveCacheImporterFuncs,
 		cms:                       map[string]solver.CacheManager{},
 		platforms:                 s.platforms,
@@ -285,6 +288,20 @@ func defaultResolver(wc *worker.Controller) ResolveWorkerFunc {
 		return wc.GetDefault()
 	}
 }
+func allWorkers(wc *worker.Controller) func(func(w worker.Worker) error) error {
+	return func(f func(worker.Worker) error) error {
+		all, err := wc.List()
+		if err != nil {
+			return err
+		}
+		for _, w := range all {
+			if err := f(w); err != nil {
+				return err
+			}
+		}
+		return nil
+	}
+}
 
 func oneOffProgress(ctx context.Context, id string) func(err error) error {
 	pw, _, _ := progress.FromContext(ctx)

+ 89 - 0
vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go

@@ -131,6 +131,34 @@ func ValidateEntitlements(ent entitlements.Set) LoadOpt {
 	}
 }
 
+type detectPrunedCacheID struct {
+	ids map[string]struct{}
+}
+
+func (dpc *detectPrunedCacheID) Load(op *pb.Op, md *pb.OpMetadata, opt *solver.VertexOptions) error {
+	if md == nil || !md.IgnoreCache {
+		return nil
+	}
+	switch op := op.Op.(type) {
+	case *pb.Op_Exec:
+		for _, m := range op.Exec.GetMounts() {
+			if m.MountType == pb.MountType_CACHE {
+				if m.CacheOpt != nil {
+					id := m.CacheOpt.ID
+					if id == "" {
+						id = m.Dest
+					}
+					if dpc.ids == nil {
+						dpc.ids = map[string]struct{}{}
+					}
+					dpc.ids[id] = struct{}{}
+				}
+			}
+		}
+	}
+	return nil
+}
+
 func Load(def *pb.Definition, opts ...LoadOpt) (solver.Edge, error) {
 	return loadLLB(def, func(dgst digest.Digest, pbOp *pb.Op, load func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error) {
 		opMetadata := def.Metadata[dgst]
@@ -188,8 +216,15 @@ func loadLLB(def *pb.Definition, fn func(digest.Digest, *pb.Op, func(digest.Dige
 		allOps[dgst] = &op
 	}
 
+	if len(allOps) < 2 {
+		return solver.Edge{}, errors.Errorf("invalid LLB with %d vertexes", len(allOps))
+	}
+
 	lastOp := allOps[dgst]
 	delete(allOps, dgst)
+	if len(lastOp.Inputs) == 0 {
+		return solver.Edge{}, errors.Errorf("invalid LLB with no inputs on last vertex")
+	}
 	dgst = lastOp.Inputs[0].Digest
 
 	cache := make(map[digest.Digest]solver.Vertex)
@@ -203,6 +238,11 @@ func loadLLB(def *pb.Definition, fn func(digest.Digest, *pb.Op, func(digest.Dige
 		if !ok {
 			return nil, errors.Errorf("invalid missing input digest %s", dgst)
 		}
+
+		if err := ValidateOp(op); err != nil {
+			return nil, err
+		}
+
 		v, err := fn(dgst, op, rec)
 		if err != nil {
 			return nil, err
@@ -240,6 +280,55 @@ func llbOpName(op *pb.Op) string {
 	}
 }
 
+func ValidateOp(op *pb.Op) error {
+	if op == nil {
+		return errors.Errorf("invalid nil op")
+	}
+
+	switch op := op.Op.(type) {
+	case *pb.Op_Source:
+		if op.Source == nil {
+			return errors.Errorf("invalid nil source op")
+		}
+	case *pb.Op_Exec:
+		if op.Exec == nil {
+			return errors.Errorf("invalid nil exec op")
+		}
+		if op.Exec.Meta == nil {
+			return errors.Errorf("invalid exec op with no meta")
+		}
+		if len(op.Exec.Meta.Args) == 0 {
+			return errors.Errorf("invalid exec op with no args")
+		}
+		if len(op.Exec.Mounts) == 0 {
+			return errors.Errorf("invalid exec op with no mounts")
+		}
+
+		isRoot := false
+		for _, m := range op.Exec.Mounts {
+			if m.Dest == pb.RootMount {
+				isRoot = true
+				break
+			}
+		}
+		if !isRoot {
+			return errors.Errorf("invalid exec op with no rootfs")
+		}
+	case *pb.Op_File:
+		if op.File == nil {
+			return errors.Errorf("invalid nil file op")
+		}
+		if len(op.File.Actions) == 0 {
+			return errors.Errorf("invalid file op with no actions")
+		}
+	case *pb.Op_Build:
+		if op.Build == nil {
+			return errors.Errorf("invalid nil build op")
+		}
+	}
+	return nil
+}
+
 func fileOpName(actions []*pb.FileAction) string {
 	names := make([]string, 0, len(actions))
 	for _, action := range actions {

+ 1 - 1
vendor/github.com/moby/buildkit/solver/result.go

@@ -40,9 +40,9 @@ func dup(res Result) (Result, Result) {
 }
 
 type splitResult struct {
-	Result
 	released int64
 	sem      *int64
+	Result
 }
 
 func (r *splitResult) Release(ctx context.Context) error {

+ 8 - 0
vendor/github.com/moby/buildkit/util/binfmt_misc/386_binary.go

@@ -0,0 +1,8 @@
+// +build !386
+
+package binfmt_misc
+
+// This file is generated by running make inside the binfmt_misc package.
+// Do not edit manually.
+
+const Binary386 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\xd8\x31\x6e\xc2\x30\x14\x06\xe0\xdf\x8d\xdb\x26\x6a\x07\x1f\x20\xaa\x3a\x74\xe8\x64\xb5\x52\xae\x00\x2c\x88\x8d\x03\x80\x14\xc1\x94\x44\x89\x91\x60\x22\x47\x60\xe0\x20\x8c\x8c\x5c\x80\x13\x70\x19\xf4\xe2\x67\x91\x81\x25\xfb\xfb\xa4\x5f\x16\xcf\xe6\x29\xeb\x7b\xfb\xd1\x74\xac\x94\x42\xf0\x82\x08\xdd\xaf\x83\x8e\x33\x00\x7f\xc6\xd7\x33\x7c\x23\xc2\x2f\x74\xb8\x27\xad\x8e\x29\x27\x00\x14\x4d\x35\x03\x7f\x6f\x7c\x0f\x4a\x02\x80\xf2\xca\x75\x7a\x77\xa4\xb4\x3a\xa6\xa4\x00\x52\xfe\x7f\xc8\x27\xbf\x9f\xcc\xe6\xd4\xef\x42\xb5\xc7\x57\x0a\x21\x84\x10\x42\x08\x21\x84\x10\x62\x88\x33\x0d\xd5\xff\xb7\x6b\x0b\xdb\xac\x1b\x57\xbb\xc5\x12\xb6\x28\x5d\x6e\x57\xc5\xc6\x56\x75\x59\xe5\xb5\xdb\xc1\xba\x7c\xeb\x86\xf4\xfd\x00\xf0\xde\xed\x13\x78\xce\xe7\x19\x3f\xd0\x7c\x7e\xf1\x5c\xff\xc6\x3b\x07\x18\xbf\x2b\x08\x54\xef\x8c\x7a\xf5\xc4\x00\x3f\x4f\xde\xdd\x03\x00\x00\xff\xff\x8d\xf7\xd2\x72\xd0\x10\x00\x00"

+ 7 - 0
vendor/github.com/moby/buildkit/util/binfmt_misc/386_check.go

@@ -0,0 +1,7 @@
+// +build !386
+
+package binfmt_misc
+
+func i386Supported() error {
+	return check(Binary386)
+}

+ 7 - 0
vendor/github.com/moby/buildkit/util/binfmt_misc/386_check_386.go

@@ -0,0 +1,7 @@
+// +build 386
+
+package binfmt_misc
+
+func i386Supported() error {
+	return nil
+}

+ 24 - 0
vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go

@@ -24,6 +24,15 @@ func SupportedPlatforms() []string {
 		if p := "linux/riscv64"; def != p && riscv64Supported() == nil {
 			arr = append(arr, p)
 		}
+		if p := "linux/ppc64le"; def != p && ppc64leSupported() == nil {
+			arr = append(arr, p)
+		}
+		if p := "linux/s390x"; def != p && s390xSupported() == nil {
+			arr = append(arr, p)
+		}
+		if p := "linux/386"; def != p && i386Supported() == nil {
+			arr = append(arr, p)
+		}
 		if !strings.HasPrefix(def, "linux/arm/") && armSupported() == nil {
 			arr = append(arr, "linux/arm/v7", "linux/arm/v6")
 		} else if def == "linux/arm/v7" {
@@ -55,6 +64,21 @@ func WarnIfUnsupported(pfs []string) {
 					printPlatfromWarning(p, err)
 				}
 			}
+			if p == "linux/ppc64le" {
+				if err := ppc64leSupported(); err != nil {
+					printPlatfromWarning(p, err)
+				}
+			}
+			if p == "linux/s390x" {
+				if err := s390xSupported(); err != nil {
+					printPlatfromWarning(p, err)
+				}
+			}
+			if p == "linux/386" {
+				if err := i386Supported(); err != nil {
+					printPlatfromWarning(p, err)
+				}
+			}
 			if strings.HasPrefix(p, "linux/arm/v6") || strings.HasPrefix(p, "linux/arm/v7") {
 				if err := armSupported(); err != nil {
 					printPlatfromWarning(p, err)

+ 8 - 0
vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_binary.go

@@ -0,0 +1,8 @@
+// +build !ppc64le
+
+package binfmt_misc
+
+// This file is generated by running make inside the binfmt_misc package.
+// Do not edit manually.
+
+const Binaryppc64le = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x64\x64\x80\x01\x26\x06\x51\x06\x10\xaf\x82\x81\x41\x00\xc4\x77\x80\x8a\x2f\x80\xcb\x83\xc4\x2c\x18\x18\x19\x1c\x18\x58\x18\x98\xc1\x6a\x59\x19\x50\x80\x00\x32\xdd\x02\xe5\xb4\xc0\xa5\x19\x61\xa4\x05\x03\x43\x82\x05\x13\x03\x83\x0b\x83\x5e\x71\x46\x71\x49\x51\x49\x62\x12\x83\x5e\x49\x6a\x45\x09\x83\x5e\x6a\x46\x7c\x5a\x51\x62\x6e\x2a\x03\xc5\x80\x1b\x6a\x23\x1b\x94\x0f\xf3\x57\x05\x94\xcf\x83\xa6\x9e\x03\x8d\x2f\x08\xd5\xcf\x84\xf0\x87\x00\xaa\x7f\x50\x01\x0b\x1a\x1f\xa4\x97\x19\x8b\x3a\x98\x7e\x69\x2c\xea\x91\x01\x20\x00\x00\xff\xff\xce\xf7\x15\x75\xa0\x01\x00\x00"

+ 7 - 0
vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check.go

@@ -0,0 +1,7 @@
+// +build !ppc64le
+
+package binfmt_misc
+
+func ppc64leSupported() error {
+	return check(Binaryppc64le)
+}

+ 7 - 0
vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check_ppc64le.go

@@ -0,0 +1,7 @@
+// +build ppc64le
+
+package binfmt_misc
+
+func ppc64leSupported() error {
+	return nil
+}

+ 8 - 0
vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_binary.go

@@ -0,0 +1,8 @@
+// +build !s390x
+
+package binfmt_misc
+
+// This file is generated by running make inside the binfmt_misc package.
+// Do not edit manually.
+
+const Binarys390x = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x62\x64\x80\x03\x26\x06\x31\x06\x06\x06\xb0\x00\x23\x03\x43\x05\x54\xd4\x01\x4a\xcf\x80\xf2\x2c\x18\x18\x19\x1c\x18\x98\x19\x98\xa0\x6a\x59\x19\x90\x00\x23\x1a\xcd\xc0\xc0\xd0\x80\x4a\x0b\x30\x2c\xd7\x64\x60\xe0\x62\x64\x67\x67\xd0\x2b\xce\x28\x2e\x29\x2a\x49\x4c\x62\xd0\x2b\x49\xad\x28\x61\xa0\x1e\xe0\x46\x72\x02\x1b\x9a\x7f\x60\x34\x07\x9a\x1e\x16\x34\x6f\x30\xe3\x30\x1b\xe6\x1f\x41\x34\x71\xb8\x97\x01\x01\x00\x00\xff\xff\x0c\x76\x9a\xe1\x58\x01\x00\x00"

+ 7 - 0
vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check.go

@@ -0,0 +1,7 @@
+// +build !s390x
+
+package binfmt_misc
+
+func s390xSupported() error {
+	return check(Binarys390x)
+}

+ 7 - 0
vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check_s390x.go

@@ -0,0 +1,7 @@
+// +build s390x
+
+package binfmt_misc
+
+func s390xSupported() error {
+	return nil
+}

+ 8 - 4
vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go

@@ -72,16 +72,18 @@ func (g *Group) do(ctx context.Context, key string, fn func(ctx context.Context)
 		g.mu.Lock()
 		delete(g.m, key)
 		g.mu.Unlock()
+		close(c.cleaned)
 	}()
 	g.mu.Unlock()
 	return c.wait(ctx)
 }
 
 type call struct {
-	mu     sync.Mutex
-	result interface{}
-	err    error
-	ready  chan struct{}
+	mu      sync.Mutex
+	result  interface{}
+	err     error
+	ready   chan struct{}
+	cleaned chan struct{}
 
 	ctx  *sharedContext
 	ctxs []context.Context
@@ -97,6 +99,7 @@ func newCall(fn func(ctx context.Context) (interface{}, error)) *call {
 	c := &call{
 		fn:            fn,
 		ready:         make(chan struct{}),
+		cleaned:       make(chan struct{}),
 		progressState: newProgressState(),
 	}
 	ctx := newContext(c) // newSharedContext
@@ -127,6 +130,7 @@ func (c *call) wait(ctx context.Context) (v interface{}, err error) {
 	select {
 	case <-c.ready: // could return if no error
 		c.mu.Unlock()
+		<-c.cleaned
 		return nil, errRetry
 	default:
 	}

+ 0 - 18
vendor/github.com/moby/buildkit/util/network/network.go

@@ -3,20 +3,9 @@ package network
 import (
 	"io"
 
-	"github.com/moby/buildkit/solver/pb"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 )
 
-// Default returns the default network provider set
-func Default() map[pb.NetMode]Provider {
-	return map[pb.NetMode]Provider{
-		// FIXME: still uses host if no provider configured
-		pb.NetMode_UNSET: NewHostProvider(),
-		pb.NetMode_HOST:  NewHostProvider(),
-		pb.NetMode_NONE:  NewNoneProvider(),
-	}
-}
-
 // Provider interface for Network
 type Provider interface {
 	New() (Namespace, error)
@@ -28,10 +17,3 @@ type Namespace interface {
 	// Set the namespace on the spec
 	Set(*specs.Spec)
 }
-
-// NetworkOpts hold network options
-type NetworkOpts struct {
-	Type          string
-	CNIConfigPath string
-	CNIPluginPath string
-}

+ 1 - 0
vendor/github.com/moby/buildkit/worker/worker.go

@@ -33,6 +33,7 @@ type Worker interface {
 	Prune(ctx context.Context, ch chan client.UsageInfo, opt ...client.PruneInfo) error
 	GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool) (*solver.Remote, error)
 	FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error)
+	PruneCacheMounts(ctx context.Context, ids []string) error
 }
 
 // Pre-defined label keys