Bladeren bron

vendor: update buildkit to d75ed2b68

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
Tonis Tiigi 5 jaren geleden
bovenliggende
commit
33baa55353
57 gewijzigde bestanden met toevoegingen van 2208 en 603 verwijderingen
  1. 3 3
      builder/builder-next/adapters/containerimage/pull.go
  2. 3 3
      builder/builder-next/worker/worker.go
  3. 2 2
      vendor.conf
  4. 24 8
      vendor/github.com/moby/buildkit/README.md
  5. 293 117
      vendor/github.com/moby/buildkit/api/services/control/control.pb.go
  6. 1 0
      vendor/github.com/moby/buildkit/api/services/control/control.proto
  7. 1 1
      vendor/github.com/moby/buildkit/cache/manager.go
  8. 4 1
      vendor/github.com/moby/buildkit/cache/refs.go
  9. 5 0
      vendor/github.com/moby/buildkit/client/build.go
  10. 161 0
      vendor/github.com/moby/buildkit/client/llb/definition.go
  11. 1 2
      vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go
  12. 8 2
      vendor/github.com/moby/buildkit/client/llb/resolver.go
  13. 1 2
      vendor/github.com/moby/buildkit/client/llb/source.go
  14. 27 13
      vendor/github.com/moby/buildkit/client/solve.go
  15. 5 4
      vendor/github.com/moby/buildkit/control/control.go
  16. 9 0
      vendor/github.com/moby/buildkit/control/gateway/gateway.go
  17. 2 2
      vendor/github.com/moby/buildkit/executor/oci/spec_unix.go
  18. 23 3
      vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
  19. 8 26
      vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
  20. 1 1
      vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go
  21. 40 0
      vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go
  22. 4 2
      vendor/github.com/moby/buildkit/frontend/frontend.go
  23. 9 11
      vendor/github.com/moby/buildkit/frontend/gateway/client/client.go
  24. 55 17
      vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go
  25. 3 2
      vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go
  26. 107 38
      vendor/github.com/moby/buildkit/frontend/gateway/gateway.go
  27. 115 25
      vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go
  28. 34 0
      vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go
  29. 567 74
      vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go
  30. 29 3
      vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto
  31. 6 4
      vendor/github.com/moby/buildkit/frontend/result.go
  32. 8 6
      vendor/github.com/moby/buildkit/go.mod
  33. 4 0
      vendor/github.com/moby/buildkit/session/sshforward/ssh.go
  34. 3 0
      vendor/github.com/moby/buildkit/solver/combinedcache.go
  35. 144 57
      vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go
  36. 70 25
      vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go
  37. 2 2
      vendor/github.com/moby/buildkit/solver/llbsolver/file/unpack.go
  38. 2 2
      vendor/github.com/moby/buildkit/solver/llbsolver/file/user_linux.go
  39. 1 1
      vendor/github.com/moby/buildkit/solver/llbsolver/file/user_nolinux.go
  40. 6 1
      vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go
  41. 52 19
      vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go
  42. 35 10
      vendor/github.com/moby/buildkit/solver/llbsolver/solver.go
  43. 16 2
      vendor/github.com/moby/buildkit/solver/pb/caps.go
  44. 64 18
      vendor/github.com/moby/buildkit/solver/types.go
  45. 11 3
      vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go
  46. 163 0
      vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go
  47. 0 67
      vendor/github.com/moby/buildkit/util/entitlements/security_linux.go
  48. 3 1
      vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go
  49. 2 1
      vendor/github.com/moby/buildkit/util/imageutil/config.go
  50. 2 2
      vendor/github.com/moby/buildkit/worker/worker.go
  51. 9 5
      vendor/github.com/tonistiigi/fsutil/copy/copy.go
  52. 7 5
      vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go
  53. 7 5
      vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go
  54. 12 3
      vendor/github.com/tonistiigi/fsutil/copy/mkdir.go
  55. 17 0
      vendor/github.com/tonistiigi/fsutil/copy/stat_bsd.go
  56. 17 0
      vendor/github.com/tonistiigi/fsutil/copy/stat_sysv.go
  57. 0 2
      vendor/github.com/tonistiigi/fsutil/go.mod

+ 3 - 3
builder/builder-next/adapters/containerimage/pull.go

@@ -28,7 +28,7 @@ import (
 	pkgprogress "github.com/docker/docker/pkg/progress"
 	pkgprogress "github.com/docker/docker/pkg/progress"
 	"github.com/docker/docker/reference"
 	"github.com/docker/docker/reference"
 	"github.com/moby/buildkit/cache"
 	"github.com/moby/buildkit/cache"
-	gw "github.com/moby/buildkit/frontend/gateway/client"
+	"github.com/moby/buildkit/client/llb"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/session/auth"
 	"github.com/moby/buildkit/session/auth"
 	"github.com/moby/buildkit/source"
 	"github.com/moby/buildkit/source"
@@ -150,7 +150,7 @@ func (is *imageSource) resolveRemote(ctx context.Context, ref string, platform *
 	return typed.dgst, typed.dt, nil
 	return typed.dgst, typed.dt, nil
 }
 }
 
 
-func (is *imageSource) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error) {
+func (is *imageSource) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error) {
 	resolveMode, err := source.ParseImageResolveMode(opt.ResolveMode)
 	resolveMode, err := source.ParseImageResolveMode(opt.ResolveMode)
 	if err != nil {
 	if err != nil {
 		return "", nil, err
 		return "", nil, err
@@ -304,7 +304,7 @@ func (p *puller) resolve(ctx context.Context) error {
 				_ = resolveProgressDone(err)
 				_ = resolveProgressDone(err)
 				return
 				return
 			}
 			}
-			_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), gw.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)}, p.sm)
+			_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)}, p.sm)
 			if err != nil {
 			if err != nil {
 				p.resolveErr = err
 				p.resolveErr = err
 				_ = resolveProgressDone(err)
 				_ = resolveProgressDone(err)

+ 3 - 3
builder/builder-next/worker/worker.go

@@ -23,12 +23,12 @@ import (
 	"github.com/moby/buildkit/cache"
 	"github.com/moby/buildkit/cache"
 	"github.com/moby/buildkit/cache/metadata"
 	"github.com/moby/buildkit/cache/metadata"
 	"github.com/moby/buildkit/client"
 	"github.com/moby/buildkit/client"
+	"github.com/moby/buildkit/client/llb"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/exporter"
 	"github.com/moby/buildkit/exporter"
 	localexporter "github.com/moby/buildkit/exporter/local"
 	localexporter "github.com/moby/buildkit/exporter/local"
 	tarexporter "github.com/moby/buildkit/exporter/tar"
 	tarexporter "github.com/moby/buildkit/exporter/tar"
 	"github.com/moby/buildkit/frontend"
 	"github.com/moby/buildkit/frontend"
-	gw "github.com/moby/buildkit/frontend/gateway/client"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/snapshot"
 	"github.com/moby/buildkit/snapshot"
 	"github.com/moby/buildkit/solver"
 	"github.com/moby/buildkit/solver"
@@ -183,7 +183,7 @@ func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *se
 }
 }
 
 
 // ResolveImageConfig returns image config for an image
 // ResolveImageConfig returns image config for an image
-func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error) {
+func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error) {
 	// ImageSource is typically source/containerimage
 	// ImageSource is typically source/containerimage
 	resolveImageConfig, ok := w.ImageSource.(resolveImageConfig)
 	resolveImageConfig, ok := w.ImageSource.(resolveImageConfig)
 	if !ok {
 	if !ok {
@@ -477,7 +477,7 @@ func oneOffProgress(ctx context.Context, id string) func(err error) error {
 }
 }
 
 
 type resolveImageConfig interface {
 type resolveImageConfig interface {
-	ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error)
+	ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error)
 }
 }
 
 
 type emptyProvider struct {
 type emptyProvider struct {

+ 2 - 2
vendor.conf

@@ -26,8 +26,8 @@ github.com/imdario/mergo                            1afb36080aec31e0d1528973ebe6
 golang.org/x/sync                                   e225da77a7e68af35c70ccbf71af2b83e6acac3c
 golang.org/x/sync                                   e225da77a7e68af35c70ccbf71af2b83e6acac3c
 
 
 # buildkit
 # buildkit
-github.com/moby/buildkit                            4f4e03067523b2fc5ca2f17514a5e75ad63e02fb
-github.com/tonistiigi/fsutil                        0f039a052ca1da01626278199624b62aed9b3729
+github.com/moby/buildkit                            d75ed2b682485d4e4a35e46531230111424f8a65
+github.com/tonistiigi/fsutil                        6c909ab392c173a4264ae1bfcbc0450b9aac0c7d
 github.com/grpc-ecosystem/grpc-opentracing          8e809c8a86450a29b90dcc9efbf062d0fe6d9746
 github.com/grpc-ecosystem/grpc-opentracing          8e809c8a86450a29b90dcc9efbf062d0fe6d9746
 github.com/opentracing/opentracing-go               1361b9cd60be79c4c3a7fa9841b3c132e40066a7
 github.com/opentracing/opentracing-go               1361b9cd60be79c4c3a7fa9841b3c132e40066a7
 github.com/google/shlex                             e7afc7fbc51079733e9468cdfd1efcd7d196cd1d
 github.com/google/shlex                             e7afc7fbc51079733e9468cdfd1efcd7d196cd1d

+ 24 - 8
vendor/github.com/moby/buildkit/README.md

@@ -45,7 +45,7 @@ You don't need to read this document unless you want to use the full-featured st
     - [Building a Dockerfile using external frontend:](#building-a-dockerfile-using-external-frontend)
     - [Building a Dockerfile using external frontend:](#building-a-dockerfile-using-external-frontend)
     - [Building a Dockerfile with experimental features like `RUN --mount=type=(bind|cache|tmpfs|secret|ssh)`](#building-a-dockerfile-with-experimental-features-like-run---mounttypebindcachetmpfssecretssh)
     - [Building a Dockerfile with experimental features like `RUN --mount=type=(bind|cache|tmpfs|secret|ssh)`](#building-a-dockerfile-with-experimental-features-like-run---mounttypebindcachetmpfssecretssh)
   - [Output](#output)
   - [Output](#output)
-    - [Registry](#registry)
+    - [Image/Registry](#imageregistry)
     - [Local directory](#local-directory)
     - [Local directory](#local-directory)
     - [Docker tarball](#docker-tarball)
     - [Docker tarball](#docker-tarball)
     - [OCI tarball](#oci-tarball)
     - [OCI tarball](#oci-tarball)
@@ -85,6 +85,7 @@ BuildKit is used by the following projects:
 -   [Rio](https://github.com/rancher/rio)
 -   [Rio](https://github.com/rancher/rio)
 -   [PouchContainer](https://github.com/alibaba/pouch)
 -   [PouchContainer](https://github.com/alibaba/pouch)
 -   [Docker buildx](https://github.com/docker/buildx)
 -   [Docker buildx](https://github.com/docker/buildx)
+-   [Okteto Cloud](https://okteto.com/)
 
 
 ## Quick start
 ## Quick start
 
 
@@ -94,7 +95,7 @@ BuildKit is composed of the `buildkitd` daemon and the `buildctl` client.
 While the `buildctl` client is available for Linux, macOS, and Windows, the `buildkitd` daemon is only available for Linux currently.
 While the `buildctl` client is available for Linux, macOS, and Windows, the `buildkitd` daemon is only available for Linux currently.
 
 
 The `buildkitd` daemon requires the following components to be installed:
 The `buildkitd` daemon requires the following components to be installed:
--   [runc](https://github.com/opencontainers/runc)
+-   [runc](https://github.com/opencontainers/runc) or [crun](https://github.com/containers/crun)
 -   [containerd](https://github.com/containerd/containerd) (if you want to use containerd worker)
 -   [containerd](https://github.com/containerd/containerd) (if you want to use containerd worker)
 
 
 The latest binaries of BuildKit are available [here](https://github.com/moby/buildkit/releases) for Linux, macOS, and Windows.
 The latest binaries of BuildKit are available [here](https://github.com/moby/buildkit/releases) for Linux, macOS, and Windows.
@@ -125,6 +126,11 @@ We are open to adding more backends.
 The buildkitd daemon listens gRPC API on `/run/buildkit/buildkitd.sock` by default, but you can also use TCP sockets.
 The buildkitd daemon listens gRPC API on `/run/buildkit/buildkitd.sock` by default, but you can also use TCP sockets.
 See [Expose BuildKit as a TCP service](#expose-buildkit-as-a-tcp-service).
 See [Expose BuildKit as a TCP service](#expose-buildkit-as-a-tcp-service).
 
 
+:information_source: Notice to Fedora 31 users:
+
+* As runc still does not work on cgroup v2 environment like Fedora 31, you need to substitute runc with crun. Run `rm -f $(which buildkit-runc) && ln -s $(which crun) /usr/local/bin/buildkit-runc` .
+* If you want to use runc, you need to configure the system to use cgroup v1. Run `sudo grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"` and reboot.
+
 ### Exploring LLB
 ### Exploring LLB
 
 
 BuildKit builds are based on a binary intermediate format called LLB that is used for defining the dependency graph for processes running part of your build. tl;dr: LLB is to Dockerfile what LLVM IR is to C.
 BuildKit builds are based on a binary intermediate format called LLB that is used for defining the dependency graph for processes running part of your build. tl;dr: LLB is to Dockerfile what LLVM IR is to C.
@@ -193,7 +199,7 @@ See [`frontend/dockerfile/docs/experimental.md`](frontend/dockerfile/docs/experi
 
 
 By default, the build result and intermediate cache will only remain internally in BuildKit. An output needs to be specified to retrieve the result.
 By default, the build result and intermediate cache will only remain internally in BuildKit. An output needs to be specified to retrieve the result.
 
 
-#### Registry
+#### Image/Registry
 
 
 ```bash
 ```bash
 buildctl build ... --output type=image,name=docker.io/username/image,push=true
 buildctl build ... --output type=image,name=docker.io/username/image,push=true
@@ -209,6 +215,18 @@ buildctl build ...\
   --import-cache type=registry,ref=docker.io/username/image
   --import-cache type=registry,ref=docker.io/username/image
 ```
 ```
 
 
+Keys supported by image output:
+* `name=[value]`: image name
+* `push=true`: push after creating the image
+* `push-by-digest=true`: push unnamed image
+* `registry.insecure=true`: push to insecure HTTP registry
+* `oci-mediatypes=true`: use OCI mediatypes in configuration JSON instead of Docker's
+* `unpack=true`: unpack image after creation (for use with containerd)
+* `dangling-name-prefix=[value]`: name image with `prefix@<digest>` , used for anonymous images
+* `name-canonical=true`: add additional canonical name `name@<digest>`
+* `compression=[uncompressed,gzip]`: choose compression type for layer, gzip is default value
+
+
 If credentials are required, `buildctl` will attempt to read Docker configuration file `$DOCKER_CONFIG/config.json`.
 If credentials are required, `buildctl` will attempt to read Docker configuration file `$DOCKER_CONFIG/config.json`.
 `$DOCKER_CONFIG` defaults to `~/.docker`.
 `$DOCKER_CONFIG` defaults to `~/.docker`.
 
 
@@ -322,14 +340,11 @@ buildctl build ... \
 
 
 ```bash
 ```bash
 buildctl build ... --export-cache type=local,dest=path/to/output-dir
 buildctl build ... --export-cache type=local,dest=path/to/output-dir
-buildctl build ... --import-cache type=local,src=path/to/input-dir,digest=sha256:deadbeef
+buildctl build ... --import-cache type=local,src=path/to/input-dir
 ```
 ```
 
 
 The directory layout conforms to OCI Image Spec v1.0.
 The directory layout conforms to OCI Image Spec v1.0.
 
 
-Currently, you need to specify the `digest` of the manifest list to import for `local` cache importer. 
-This is planned to default to the digest of "latest" tag in `index.json` in future.
-
 #### `--export-cache` options
 #### `--export-cache` options
 -   `type`: `inline`, `registry`, or `local`
 -   `type`: `inline`, `registry`, or `local`
 -   `mode=min` (default): only export layers for the resulting image
 -   `mode=min` (default): only export layers for the resulting image
@@ -341,7 +356,8 @@ This is planned to default to the digest of "latest" tag in `index.json` in futu
 -   `type`: `registry` or `local`. Use `registry` to import `inline` cache.
 -   `type`: `registry` or `local`. Use `registry` to import `inline` cache.
 -   `ref=docker.io/user/image:tag`: reference for `registry` cache importer
 -   `ref=docker.io/user/image:tag`: reference for `registry` cache importer
 -   `src=path/to/input-dir`: directory for `local` cache importer
 -   `src=path/to/input-dir`: directory for `local` cache importer
--   `digest=sha256:deadbeef`: digest of the manifest list to import for `local` cache importer. 
+-   `digest=sha256:deadbeef`: digest of the manifest list to import for `local` cache importer.
+    Defaults to the digest of "latest" tag in `index.json`
 
 
 ### Consistent hashing
 ### Consistent hashing
 
 

+ 293 - 117
vendor/github.com/moby/buildkit/api/services/control/control.pb.go

@@ -50,7 +50,7 @@ func (m *PruneRequest) Reset()         { *m = PruneRequest{} }
 func (m *PruneRequest) String() string { return proto.CompactTextString(m) }
 func (m *PruneRequest) String() string { return proto.CompactTextString(m) }
 func (*PruneRequest) ProtoMessage()    {}
 func (*PruneRequest) ProtoMessage()    {}
 func (*PruneRequest) Descriptor() ([]byte, []int) {
 func (*PruneRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_control_86d7f5d7b8f10de2, []int{0}
+	return fileDescriptor_control_7e741c2ad6bf4a8a, []int{0}
 }
 }
 func (m *PruneRequest) XXX_Unmarshal(b []byte) error {
 func (m *PruneRequest) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
 	return m.Unmarshal(b)
@@ -118,7 +118,7 @@ func (m *DiskUsageRequest) Reset()         { *m = DiskUsageRequest{} }
 func (m *DiskUsageRequest) String() string { return proto.CompactTextString(m) }
 func (m *DiskUsageRequest) String() string { return proto.CompactTextString(m) }
 func (*DiskUsageRequest) ProtoMessage()    {}
 func (*DiskUsageRequest) ProtoMessage()    {}
 func (*DiskUsageRequest) Descriptor() ([]byte, []int) {
 func (*DiskUsageRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_control_86d7f5d7b8f10de2, []int{1}
+	return fileDescriptor_control_7e741c2ad6bf4a8a, []int{1}
 }
 }
 func (m *DiskUsageRequest) XXX_Unmarshal(b []byte) error {
 func (m *DiskUsageRequest) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
 	return m.Unmarshal(b)
@@ -165,7 +165,7 @@ func (m *DiskUsageResponse) Reset()         { *m = DiskUsageResponse{} }
 func (m *DiskUsageResponse) String() string { return proto.CompactTextString(m) }
 func (m *DiskUsageResponse) String() string { return proto.CompactTextString(m) }
 func (*DiskUsageResponse) ProtoMessage()    {}
 func (*DiskUsageResponse) ProtoMessage()    {}
 func (*DiskUsageResponse) Descriptor() ([]byte, []int) {
 func (*DiskUsageResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_control_86d7f5d7b8f10de2, []int{2}
+	return fileDescriptor_control_7e741c2ad6bf4a8a, []int{2}
 }
 }
 func (m *DiskUsageResponse) XXX_Unmarshal(b []byte) error {
 func (m *DiskUsageResponse) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
 	return m.Unmarshal(b)
@@ -222,7 +222,7 @@ func (m *UsageRecord) Reset()         { *m = UsageRecord{} }
 func (m *UsageRecord) String() string { return proto.CompactTextString(m) }
 func (m *UsageRecord) String() string { return proto.CompactTextString(m) }
 func (*UsageRecord) ProtoMessage()    {}
 func (*UsageRecord) ProtoMessage()    {}
 func (*UsageRecord) Descriptor() ([]byte, []int) {
 func (*UsageRecord) Descriptor() ([]byte, []int) {
-	return fileDescriptor_control_86d7f5d7b8f10de2, []int{3}
+	return fileDescriptor_control_7e741c2ad6bf4a8a, []int{3}
 }
 }
 func (m *UsageRecord) XXX_Unmarshal(b []byte) error {
 func (m *UsageRecord) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
 	return m.Unmarshal(b)
@@ -338,6 +338,7 @@ type SolveRequest struct {
 	FrontendAttrs        map[string]string                                        `protobuf:"bytes,7,rep,name=FrontendAttrs,proto3" json:"FrontendAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
 	FrontendAttrs        map[string]string                                        `protobuf:"bytes,7,rep,name=FrontendAttrs,proto3" json:"FrontendAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
 	Cache                CacheOptions                                             `protobuf:"bytes,8,opt,name=Cache,proto3" json:"Cache"`
 	Cache                CacheOptions                                             `protobuf:"bytes,8,opt,name=Cache,proto3" json:"Cache"`
 	Entitlements         []github_com_moby_buildkit_util_entitlements.Entitlement `protobuf:"bytes,9,rep,name=Entitlements,proto3,customtype=github.com/moby/buildkit/util/entitlements.Entitlement" json:"Entitlements,omitempty"`
 	Entitlements         []github_com_moby_buildkit_util_entitlements.Entitlement `protobuf:"bytes,9,rep,name=Entitlements,proto3,customtype=github.com/moby/buildkit/util/entitlements.Entitlement" json:"Entitlements,omitempty"`
+	FrontendInputs       map[string]*pb.Definition                                `protobuf:"bytes,10,rep,name=FrontendInputs,proto3" json:"FrontendInputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
 	XXX_NoUnkeyedLiteral struct{}                                                 `json:"-"`
 	XXX_NoUnkeyedLiteral struct{}                                                 `json:"-"`
 	XXX_unrecognized     []byte                                                   `json:"-"`
 	XXX_unrecognized     []byte                                                   `json:"-"`
 	XXX_sizecache        int32                                                    `json:"-"`
 	XXX_sizecache        int32                                                    `json:"-"`
@@ -347,7 +348,7 @@ func (m *SolveRequest) Reset()         { *m = SolveRequest{} }
 func (m *SolveRequest) String() string { return proto.CompactTextString(m) }
 func (m *SolveRequest) String() string { return proto.CompactTextString(m) }
 func (*SolveRequest) ProtoMessage()    {}
 func (*SolveRequest) ProtoMessage()    {}
 func (*SolveRequest) Descriptor() ([]byte, []int) {
 func (*SolveRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_control_86d7f5d7b8f10de2, []int{4}
+	return fileDescriptor_control_7e741c2ad6bf4a8a, []int{4}
 }
 }
 func (m *SolveRequest) XXX_Unmarshal(b []byte) error {
 func (m *SolveRequest) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
 	return m.Unmarshal(b)
@@ -432,6 +433,13 @@ func (m *SolveRequest) GetCache() CacheOptions {
 	return CacheOptions{}
 	return CacheOptions{}
 }
 }
 
 
+func (m *SolveRequest) GetFrontendInputs() map[string]*pb.Definition {
+	if m != nil {
+		return m.FrontendInputs
+	}
+	return nil
+}
+
 type CacheOptions struct {
 type CacheOptions struct {
 	// ExportRefDeprecated is deprecated in favor or the new Exports since BuildKit v0.4.0.
 	// ExportRefDeprecated is deprecated in favor or the new Exports since BuildKit v0.4.0.
 	// When ExportRefDeprecated is set, the solver appends
 	// When ExportRefDeprecated is set, the solver appends
@@ -459,7 +467,7 @@ func (m *CacheOptions) Reset()         { *m = CacheOptions{} }
 func (m *CacheOptions) String() string { return proto.CompactTextString(m) }
 func (m *CacheOptions) String() string { return proto.CompactTextString(m) }
 func (*CacheOptions) ProtoMessage()    {}
 func (*CacheOptions) ProtoMessage()    {}
 func (*CacheOptions) Descriptor() ([]byte, []int) {
 func (*CacheOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_control_86d7f5d7b8f10de2, []int{5}
+	return fileDescriptor_control_7e741c2ad6bf4a8a, []int{5}
 }
 }
 func (m *CacheOptions) XXX_Unmarshal(b []byte) error {
 func (m *CacheOptions) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
 	return m.Unmarshal(b)
@@ -538,7 +546,7 @@ func (m *CacheOptionsEntry) Reset()         { *m = CacheOptionsEntry{} }
 func (m *CacheOptionsEntry) String() string { return proto.CompactTextString(m) }
 func (m *CacheOptionsEntry) String() string { return proto.CompactTextString(m) }
 func (*CacheOptionsEntry) ProtoMessage()    {}
 func (*CacheOptionsEntry) ProtoMessage()    {}
 func (*CacheOptionsEntry) Descriptor() ([]byte, []int) {
 func (*CacheOptionsEntry) Descriptor() ([]byte, []int) {
-	return fileDescriptor_control_86d7f5d7b8f10de2, []int{6}
+	return fileDescriptor_control_7e741c2ad6bf4a8a, []int{6}
 }
 }
 func (m *CacheOptionsEntry) XXX_Unmarshal(b []byte) error {
 func (m *CacheOptionsEntry) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
 	return m.Unmarshal(b)
@@ -592,7 +600,7 @@ func (m *SolveResponse) Reset()         { *m = SolveResponse{} }
 func (m *SolveResponse) String() string { return proto.CompactTextString(m) }
 func (m *SolveResponse) String() string { return proto.CompactTextString(m) }
 func (*SolveResponse) ProtoMessage()    {}
 func (*SolveResponse) ProtoMessage()    {}
 func (*SolveResponse) Descriptor() ([]byte, []int) {
 func (*SolveResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_control_86d7f5d7b8f10de2, []int{7}
+	return fileDescriptor_control_7e741c2ad6bf4a8a, []int{7}
 }
 }
 func (m *SolveResponse) XXX_Unmarshal(b []byte) error {
 func (m *SolveResponse) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
 	return m.Unmarshal(b)
@@ -639,7 +647,7 @@ func (m *StatusRequest) Reset()         { *m = StatusRequest{} }
 func (m *StatusRequest) String() string { return proto.CompactTextString(m) }
 func (m *StatusRequest) String() string { return proto.CompactTextString(m) }
 func (*StatusRequest) ProtoMessage()    {}
 func (*StatusRequest) ProtoMessage()    {}
 func (*StatusRequest) Descriptor() ([]byte, []int) {
 func (*StatusRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_control_86d7f5d7b8f10de2, []int{8}
+	return fileDescriptor_control_7e741c2ad6bf4a8a, []int{8}
 }
 }
 func (m *StatusRequest) XXX_Unmarshal(b []byte) error {
 func (m *StatusRequest) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
 	return m.Unmarshal(b)
@@ -688,7 +696,7 @@ func (m *StatusResponse) Reset()         { *m = StatusResponse{} }
 func (m *StatusResponse) String() string { return proto.CompactTextString(m) }
 func (m *StatusResponse) String() string { return proto.CompactTextString(m) }
 func (*StatusResponse) ProtoMessage()    {}
 func (*StatusResponse) ProtoMessage()    {}
 func (*StatusResponse) Descriptor() ([]byte, []int) {
 func (*StatusResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_control_86d7f5d7b8f10de2, []int{9}
+	return fileDescriptor_control_7e741c2ad6bf4a8a, []int{9}
 }
 }
 func (m *StatusResponse) XXX_Unmarshal(b []byte) error {
 func (m *StatusResponse) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
 	return m.Unmarshal(b)
@@ -755,7 +763,7 @@ func (m *Vertex) Reset()         { *m = Vertex{} }
 func (m *Vertex) String() string { return proto.CompactTextString(m) }
 func (m *Vertex) String() string { return proto.CompactTextString(m) }
 func (*Vertex) ProtoMessage()    {}
 func (*Vertex) ProtoMessage()    {}
 func (*Vertex) Descriptor() ([]byte, []int) {
 func (*Vertex) Descriptor() ([]byte, []int) {
-	return fileDescriptor_control_86d7f5d7b8f10de2, []int{10}
+	return fileDescriptor_control_7e741c2ad6bf4a8a, []int{10}
 }
 }
 func (m *Vertex) XXX_Unmarshal(b []byte) error {
 func (m *Vertex) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
 	return m.Unmarshal(b)
@@ -838,7 +846,7 @@ func (m *VertexStatus) Reset()         { *m = VertexStatus{} }
 func (m *VertexStatus) String() string { return proto.CompactTextString(m) }
 func (m *VertexStatus) String() string { return proto.CompactTextString(m) }
 func (*VertexStatus) ProtoMessage()    {}
 func (*VertexStatus) ProtoMessage()    {}
 func (*VertexStatus) Descriptor() ([]byte, []int) {
 func (*VertexStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_control_86d7f5d7b8f10de2, []int{11}
+	return fileDescriptor_control_7e741c2ad6bf4a8a, []int{11}
 }
 }
 func (m *VertexStatus) XXX_Unmarshal(b []byte) error {
 func (m *VertexStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
 	return m.Unmarshal(b)
@@ -930,7 +938,7 @@ func (m *VertexLog) Reset()         { *m = VertexLog{} }
 func (m *VertexLog) String() string { return proto.CompactTextString(m) }
 func (m *VertexLog) String() string { return proto.CompactTextString(m) }
 func (*VertexLog) ProtoMessage()    {}
 func (*VertexLog) ProtoMessage()    {}
 func (*VertexLog) Descriptor() ([]byte, []int) {
 func (*VertexLog) Descriptor() ([]byte, []int) {
-	return fileDescriptor_control_86d7f5d7b8f10de2, []int{12}
+	return fileDescriptor_control_7e741c2ad6bf4a8a, []int{12}
 }
 }
 func (m *VertexLog) XXX_Unmarshal(b []byte) error {
 func (m *VertexLog) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
 	return m.Unmarshal(b)
@@ -991,7 +999,7 @@ func (m *BytesMessage) Reset()         { *m = BytesMessage{} }
 func (m *BytesMessage) String() string { return proto.CompactTextString(m) }
 func (m *BytesMessage) String() string { return proto.CompactTextString(m) }
 func (*BytesMessage) ProtoMessage()    {}
 func (*BytesMessage) ProtoMessage()    {}
 func (*BytesMessage) Descriptor() ([]byte, []int) {
 func (*BytesMessage) Descriptor() ([]byte, []int) {
-	return fileDescriptor_control_86d7f5d7b8f10de2, []int{13}
+	return fileDescriptor_control_7e741c2ad6bf4a8a, []int{13}
 }
 }
 func (m *BytesMessage) XXX_Unmarshal(b []byte) error {
 func (m *BytesMessage) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
 	return m.Unmarshal(b)
@@ -1038,7 +1046,7 @@ func (m *ListWorkersRequest) Reset()         { *m = ListWorkersRequest{} }
 func (m *ListWorkersRequest) String() string { return proto.CompactTextString(m) }
 func (m *ListWorkersRequest) String() string { return proto.CompactTextString(m) }
 func (*ListWorkersRequest) ProtoMessage()    {}
 func (*ListWorkersRequest) ProtoMessage()    {}
 func (*ListWorkersRequest) Descriptor() ([]byte, []int) {
 func (*ListWorkersRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_control_86d7f5d7b8f10de2, []int{14}
+	return fileDescriptor_control_7e741c2ad6bf4a8a, []int{14}
 }
 }
 func (m *ListWorkersRequest) XXX_Unmarshal(b []byte) error {
 func (m *ListWorkersRequest) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
 	return m.Unmarshal(b)
@@ -1085,7 +1093,7 @@ func (m *ListWorkersResponse) Reset()         { *m = ListWorkersResponse{} }
 func (m *ListWorkersResponse) String() string { return proto.CompactTextString(m) }
 func (m *ListWorkersResponse) String() string { return proto.CompactTextString(m) }
 func (*ListWorkersResponse) ProtoMessage()    {}
 func (*ListWorkersResponse) ProtoMessage()    {}
 func (*ListWorkersResponse) Descriptor() ([]byte, []int) {
 func (*ListWorkersResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_control_86d7f5d7b8f10de2, []int{15}
+	return fileDescriptor_control_7e741c2ad6bf4a8a, []int{15}
 }
 }
 func (m *ListWorkersResponse) XXX_Unmarshal(b []byte) error {
 func (m *ListWorkersResponse) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
 	return m.Unmarshal(b)
@@ -1129,6 +1137,7 @@ func init() {
 	proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.SolveRequest")
 	proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.SolveRequest")
 	proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.ExporterAttrsEntry")
 	proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.ExporterAttrsEntry")
 	proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.FrontendAttrsEntry")
 	proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.FrontendAttrsEntry")
+	proto.RegisterMapType((map[string]*pb.Definition)(nil), "moby.buildkit.v1.SolveRequest.FrontendInputsEntry")
 	proto.RegisterType((*CacheOptions)(nil), "moby.buildkit.v1.CacheOptions")
 	proto.RegisterType((*CacheOptions)(nil), "moby.buildkit.v1.CacheOptions")
 	proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.CacheOptions.ExportAttrsDeprecatedEntry")
 	proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.CacheOptions.ExportAttrsDeprecatedEntry")
 	proto.RegisterType((*CacheOptionsEntry)(nil), "moby.buildkit.v1.CacheOptionsEntry")
 	proto.RegisterType((*CacheOptionsEntry)(nil), "moby.buildkit.v1.CacheOptionsEntry")
@@ -1803,6 +1812,34 @@ func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) {
 			i += copy(dAtA[i:], s)
 			i += copy(dAtA[i:], s)
 		}
 		}
 	}
 	}
+	if len(m.FrontendInputs) > 0 {
+		for k, _ := range m.FrontendInputs {
+			dAtA[i] = 0x52
+			i++
+			v := m.FrontendInputs[k]
+			msgSize := 0
+			if v != nil {
+				msgSize = v.Size()
+				msgSize += 1 + sovControl(uint64(msgSize))
+			}
+			mapSize := 1 + len(k) + sovControl(uint64(len(k))) + msgSize
+			i = encodeVarintControl(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintControl(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			if v != nil {
+				dAtA[i] = 0x12
+				i++
+				i = encodeVarintControl(dAtA, i, uint64(v.Size()))
+				n5, err := v.MarshalTo(dAtA[i:])
+				if err != nil {
+					return 0, err
+				}
+				i += n5
+			}
+		}
+	}
 	if m.XXX_unrecognized != nil {
 	if m.XXX_unrecognized != nil {
 		i += copy(dAtA[i:], m.XXX_unrecognized)
 		i += copy(dAtA[i:], m.XXX_unrecognized)
 	}
 	}
@@ -2114,21 +2151,21 @@ func (m *Vertex) MarshalTo(dAtA []byte) (int, error) {
 		dAtA[i] = 0x2a
 		dAtA[i] = 0x2a
 		i++
 		i++
 		i = encodeVarintControl(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started)))
 		i = encodeVarintControl(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started)))
-		n5, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i:])
+		n6, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i:])
 		if err != nil {
 		if err != nil {
 			return 0, err
 			return 0, err
 		}
 		}
-		i += n5
+		i += n6
 	}
 	}
 	if m.Completed != nil {
 	if m.Completed != nil {
 		dAtA[i] = 0x32
 		dAtA[i] = 0x32
 		i++
 		i++
 		i = encodeVarintControl(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed)))
 		i = encodeVarintControl(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed)))
-		n6, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i:])
+		n7, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i:])
 		if err != nil {
 		if err != nil {
 			return 0, err
 			return 0, err
 		}
 		}
-		i += n6
+		i += n7
 	}
 	}
 	if len(m.Error) > 0 {
 	if len(m.Error) > 0 {
 		dAtA[i] = 0x3a
 		dAtA[i] = 0x3a
@@ -2188,30 +2225,30 @@ func (m *VertexStatus) MarshalTo(dAtA []byte) (int, error) {
 	dAtA[i] = 0x32
 	dAtA[i] = 0x32
 	i++
 	i++
 	i = encodeVarintControl(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
 	i = encodeVarintControl(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
-	n7, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
+	n8, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
 	if err != nil {
 	if err != nil {
 		return 0, err
 		return 0, err
 	}
 	}
-	i += n7
+	i += n8
 	if m.Started != nil {
 	if m.Started != nil {
 		dAtA[i] = 0x3a
 		dAtA[i] = 0x3a
 		i++
 		i++
 		i = encodeVarintControl(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started)))
 		i = encodeVarintControl(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started)))
-		n8, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i:])
+		n9, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i:])
 		if err != nil {
 		if err != nil {
 			return 0, err
 			return 0, err
 		}
 		}
-		i += n8
+		i += n9
 	}
 	}
 	if m.Completed != nil {
 	if m.Completed != nil {
 		dAtA[i] = 0x42
 		dAtA[i] = 0x42
 		i++
 		i++
 		i = encodeVarintControl(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed)))
 		i = encodeVarintControl(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed)))
-		n9, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i:])
+		n10, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i:])
 		if err != nil {
 		if err != nil {
 			return 0, err
 			return 0, err
 		}
 		}
-		i += n9
+		i += n10
 	}
 	}
 	if m.XXX_unrecognized != nil {
 	if m.XXX_unrecognized != nil {
 		i += copy(dAtA[i:], m.XXX_unrecognized)
 		i += copy(dAtA[i:], m.XXX_unrecognized)
@@ -2243,11 +2280,11 @@ func (m *VertexLog) MarshalTo(dAtA []byte) (int, error) {
 	dAtA[i] = 0x12
 	dAtA[i] = 0x12
 	i++
 	i++
 	i = encodeVarintControl(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
 	i = encodeVarintControl(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
-	n10, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
+	n11, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
 	if err != nil {
 	if err != nil {
 		return 0, err
 		return 0, err
 	}
 	}
-	i += n10
+	i += n11
 	if m.Stream != 0 {
 	if m.Stream != 0 {
 		dAtA[i] = 0x18
 		dAtA[i] = 0x18
 		i++
 		i++
@@ -2532,6 +2569,19 @@ func (m *SolveRequest) Size() (n int) {
 			n += 1 + l + sovControl(uint64(l))
 			n += 1 + l + sovControl(uint64(l))
 		}
 		}
 	}
 	}
+	if len(m.FrontendInputs) > 0 {
+		for k, v := range m.FrontendInputs {
+			_ = k
+			_ = v
+			l = 0
+			if v != nil {
+				l = v.Size()
+				l += 1 + sovControl(uint64(l))
+			}
+			mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + l
+			n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
+		}
+	}
 	if m.XXX_unrecognized != nil {
 	if m.XXX_unrecognized != nil {
 		n += len(m.XXX_unrecognized)
 		n += len(m.XXX_unrecognized)
 	}
 	}
@@ -3942,6 +3992,129 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
 			}
 			}
 			m.Entitlements = append(m.Entitlements, github_com_moby_buildkit_util_entitlements.Entitlement(dAtA[iNdEx:postIndex]))
 			m.Entitlements = append(m.Entitlements, github_com_moby_buildkit_util_entitlements.Entitlement(dAtA[iNdEx:postIndex]))
 			iNdEx = postIndex
 			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FrontendInputs", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowControl
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthControl
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.FrontendInputs == nil {
+				m.FrontendInputs = make(map[string]*pb.Definition)
+			}
+			var mapkey string
+			var mapvalue *pb.Definition
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowControl
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowControl
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthControl
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowControl
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= (int(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthControl
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if mapmsglen < 0 {
+						return ErrInvalidLengthControl
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &pb.Definition{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipControl(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthControl
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.FrontendInputs[mapkey] = mapvalue
+			iNdEx = postIndex
 		default:
 		default:
 			iNdEx = preIndex
 			iNdEx = preIndex
 			skippy, err := skipControl(dAtA[iNdEx:])
 			skippy, err := skipControl(dAtA[iNdEx:])
@@ -5878,93 +6051,96 @@ var (
 	ErrIntOverflowControl   = fmt.Errorf("proto: integer overflow")
 	ErrIntOverflowControl   = fmt.Errorf("proto: integer overflow")
 )
 )
 
 
-func init() { proto.RegisterFile("control.proto", fileDescriptor_control_86d7f5d7b8f10de2) }
-
-var fileDescriptor_control_86d7f5d7b8f10de2 = []byte{
-	// 1359 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4b, 0x6f, 0x1b, 0xb7,
-	0x16, 0xce, 0x48, 0xd6, 0xeb, 0x48, 0x0e, 0x1c, 0x26, 0x37, 0x18, 0xcc, 0xc5, 0xb5, 0x75, 0x27,
-	0x2d, 0x20, 0x04, 0xc9, 0xc8, 0x71, 0x9b, 0x22, 0x35, 0xda, 0x22, 0x91, 0x95, 0x22, 0x0e, 0x62,
-	0x34, 0xa0, 0x93, 0x06, 0xe8, 0xa2, 0xc0, 0x48, 0xa2, 0x95, 0x81, 0x47, 0xc3, 0x29, 0xc9, 0x71,
-	0xa3, 0xfe, 0x80, 0xae, 0xfb, 0x2f, 0xba, 0xea, 0xaa, 0x8b, 0xfe, 0x82, 0x02, 0x59, 0x76, 0x9d,
-	0x85, 0x5b, 0x64, 0xdf, 0xae, 0xba, 0xe9, 0xae, 0xe0, 0x63, 0x64, 0xca, 0x92, 0xfc, 0xca, 0x6a,
-	0x78, 0x38, 0xe7, 0xfb, 0x78, 0x5e, 0x24, 0x0f, 0x61, 0xb9, 0x4f, 0x13, 0xc1, 0x68, 0x1c, 0xa4,
-	0x8c, 0x0a, 0x8a, 0x56, 0x46, 0xb4, 0x37, 0x0e, 0x7a, 0x59, 0x14, 0x0f, 0xf6, 0x23, 0x11, 0x1c,
-	0xdc, 0xf1, 0x6e, 0x0f, 0x23, 0xf1, 0x32, 0xeb, 0x05, 0x7d, 0x3a, 0x6a, 0x0f, 0xe9, 0x90, 0xb6,
-	0x95, 0x62, 0x2f, 0xdb, 0x53, 0x92, 0x12, 0xd4, 0x48, 0x13, 0x78, 0x6b, 0x43, 0x4a, 0x87, 0x31,
-	0x39, 0xd2, 0x12, 0xd1, 0x88, 0x70, 0x11, 0x8e, 0x52, 0xa3, 0x70, 0xcb, 0xe2, 0x93, 0x8b, 0xb5,
-	0xf3, 0xc5, 0xda, 0x9c, 0xc6, 0x07, 0x84, 0xb5, 0xd3, 0x5e, 0x9b, 0xa6, 0xdc, 0x68, 0xb7, 0x17,
-	0x6a, 0x87, 0x69, 0xd4, 0x16, 0xe3, 0x94, 0xf0, 0xf6, 0xb7, 0x94, 0xed, 0x13, 0xa6, 0x01, 0xfe,
-	0xf7, 0x0e, 0x34, 0x9e, 0xb2, 0x2c, 0x21, 0x98, 0x7c, 0x93, 0x11, 0x2e, 0xd0, 0x75, 0x28, 0xef,
-	0x45, 0xb1, 0x20, 0xcc, 0x75, 0x9a, 0xc5, 0x56, 0x0d, 0x1b, 0x09, 0xad, 0x40, 0x31, 0x8c, 0x63,
-	0xb7, 0xd0, 0x74, 0x5a, 0x55, 0x2c, 0x87, 0xa8, 0x05, 0x8d, 0x7d, 0x42, 0xd2, 0x6e, 0xc6, 0x42,
-	0x11, 0xd1, 0xc4, 0x2d, 0x36, 0x9d, 0x56, 0xb1, 0xb3, 0xf4, 0xfa, 0x70, 0xcd, 0xc1, 0x53, 0x7f,
-	0x90, 0x0f, 0x35, 0x29, 0x77, 0xc6, 0x82, 0x70, 0x77, 0xc9, 0x52, 0x3b, 0x9a, 0xf6, 0x6f, 0xc2,
-	0x4a, 0x37, 0xe2, 0xfb, 0xcf, 0x79, 0x38, 0x3c, 0xcd, 0x16, 0xff, 0x31, 0x5c, 0xb1, 0x74, 0x79,
-	0x4a, 0x13, 0x4e, 0xd0, 0x5d, 0x28, 0x33, 0xd2, 0xa7, 0x6c, 0xa0, 0x94, 0xeb, 0x1b, 0xff, 0x0b,
-	0x8e, 0xe7, 0x26, 0x30, 0x00, 0xa9, 0x84, 0x8d, 0xb2, 0xff, 0x4f, 0x01, 0xea, 0xd6, 0x3c, 0xba,
-	0x0c, 0x85, 0xed, 0xae, 0xeb, 0x34, 0x9d, 0x56, 0x0d, 0x17, 0xb6, 0xbb, 0xc8, 0x85, 0xca, 0x4e,
-	0x26, 0xc2, 0x5e, 0x4c, 0x8c, 0xef, 0xb9, 0x88, 0xae, 0x41, 0x69, 0x3b, 0x79, 0xce, 0x89, 0x72,
-	0xbc, 0x8a, 0xb5, 0x80, 0x10, 0x2c, 0xed, 0x46, 0xdf, 0x11, 0xed, 0x26, 0x56, 0x63, 0xe9, 0xc7,
-	0xd3, 0x90, 0x91, 0x44, 0xb8, 0x25, 0xc5, 0x6b, 0x24, 0xd4, 0x81, 0xda, 0x16, 0x23, 0xa1, 0x20,
-	0x83, 0x07, 0xc2, 0x2d, 0x37, 0x9d, 0x56, 0x7d, 0xc3, 0x0b, 0x74, 0x41, 0x04, 0x79, 0x41, 0x04,
-	0xcf, 0xf2, 0x82, 0xe8, 0x54, 0x5f, 0x1f, 0xae, 0x5d, 0xfa, 0xe1, 0x77, 0x19, 0xb7, 0x09, 0x0c,
-	0xdd, 0x07, 0x78, 0x12, 0x72, 0xf1, 0x9c, 0x2b, 0x92, 0xca, 0xa9, 0x24, 0x4b, 0x8a, 0xc0, 0xc2,
-	0xa0, 0x55, 0x00, 0x15, 0x80, 0x2d, 0x9a, 0x25, 0xc2, 0xad, 0x2a, 0xbb, 0xad, 0x19, 0xd4, 0x84,
-	0x7a, 0x97, 0xf0, 0x3e, 0x8b, 0x52, 0x95, 0xe6, 0x9a, 0x72, 0xc1, 0x9e, 0x92, 0x0c, 0x3a, 0x7a,
-	0xcf, 0xc6, 0x29, 0x71, 0x41, 0x29, 0x58, 0x33, 0xd2, 0xff, 0xdd, 0x97, 0x21, 0x23, 0x03, 0xb7,
-	0xae, 0x42, 0x65, 0x24, 0xff, 0xef, 0x25, 0x68, 0xec, 0xca, 0x2a, 0xce, 0x13, 0xbe, 0x02, 0x45,
-	0x4c, 0xf6, 0x4c, 0xf4, 0xe5, 0x10, 0x05, 0x00, 0x5d, 0xb2, 0x17, 0x25, 0x91, 0x5a, 0xbb, 0xa0,
-	0xdc, 0xbb, 0x1c, 0xa4, 0xbd, 0xe0, 0x68, 0x16, 0x5b, 0x1a, 0xc8, 0x83, 0xea, 0xc3, 0x57, 0x29,
-	0x65, 0xb2, 0x68, 0x8a, 0x8a, 0x66, 0x22, 0xa3, 0x17, 0xb0, 0x9c, 0x8f, 0x1f, 0x08, 0xc1, 0x64,
-	0x29, 0xca, 0x42, 0xb9, 0x33, 0x5b, 0x28, 0xb6, 0x51, 0xc1, 0x14, 0xe6, 0x61, 0x22, 0xd8, 0x18,
-	0x4f, 0xf3, 0xc8, 0x1a, 0xd9, 0x25, 0x9c, 0x4b, 0x0b, 0x75, 0x82, 0x73, 0x51, 0x9a, 0xf3, 0x39,
-	0xa3, 0x89, 0x20, 0xc9, 0x40, 0x25, 0xb8, 0x86, 0x27, 0xb2, 0x34, 0x27, 0x1f, 0x6b, 0x73, 0x2a,
-	0x67, 0x32, 0x67, 0x0a, 0x63, 0xcc, 0x99, 0x9a, 0x43, 0x9b, 0x50, 0xda, 0x0a, 0xfb, 0x2f, 0x89,
-	0xca, 0x65, 0x7d, 0x63, 0x75, 0x96, 0x50, 0xfd, 0xfe, 0x42, 0x25, 0x8f, 0xab, 0xad, 0x78, 0x09,
-	0x6b, 0x08, 0xfa, 0x1a, 0x1a, 0x0f, 0x13, 0x11, 0x89, 0x98, 0x8c, 0x48, 0x22, 0xb8, 0x5b, 0x93,
-	0x1b, 0xaf, 0xb3, 0xf9, 0xe6, 0x70, 0xed, 0xa3, 0x85, 0x47, 0x4b, 0x26, 0xa2, 0xb8, 0x4d, 0x2c,
-	0x54, 0x60, 0x51, 0xe0, 0x29, 0x3e, 0xef, 0x3e, 0xa0, 0xd9, 0x78, 0xca, 0xbc, 0xef, 0x93, 0x71,
-	0x9e, 0xf7, 0x7d, 0x32, 0x96, 0x9b, 0xeb, 0x20, 0x8c, 0x33, 0xbd, 0xe9, 0x6a, 0x58, 0x0b, 0x9b,
-	0x85, 0x7b, 0x8e, 0x64, 0x98, 0x0d, 0xc1, 0x79, 0x18, 0xfc, 0x9f, 0x8a, 0xd0, 0xb0, 0x23, 0x80,
-	0xd6, 0xe1, 0xaa, 0x36, 0x0a, 0x93, 0xbd, 0x2e, 0x49, 0x19, 0xe9, 0xcb, 0xcd, 0x65, 0xc8, 0xe6,
-	0xfd, 0x42, 0x1b, 0x70, 0x6d, 0x7b, 0x64, 0xa6, 0xb9, 0x05, 0x29, 0xa8, 0x73, 0x6a, 0xee, 0x3f,
-	0x44, 0xe1, 0x3f, 0x9a, 0x4a, 0x99, 0x6d, 0x81, 0x8a, 0x2a, 0xef, 0x1f, 0x9f, 0x9c, 0xa6, 0x60,
-	0x2e, 0x56, 0xe7, 0x7f, 0x3e, 0x2f, 0xfa, 0x14, 0x2a, 0xfa, 0x47, 0x5e, 0xe9, 0x37, 0x4e, 0x5e,
-	0x42, 0x93, 0xe5, 0x18, 0x09, 0xd7, 0x7e, 0x70, 0xb7, 0x74, 0x0e, 0xb8, 0xc1, 0x78, 0x8f, 0xc0,
-	0x5b, 0x6c, 0xf2, 0xb9, 0xf2, 0xf5, 0xa3, 0x03, 0x57, 0x66, 0x16, 0x92, 0x07, 0xad, 0x3a, 0x6e,
-	0x34, 0x85, 0x1a, 0xa3, 0x2e, 0x94, 0xf4, 0x56, 0x2a, 0x28, 0x83, 0x83, 0x33, 0x18, 0x1c, 0x58,
-	0xfb, 0x48, 0x83, 0xbd, 0x7b, 0x00, 0x17, 0xac, 0xac, 0x5f, 0x1c, 0x58, 0x36, 0x9b, 0xd5, 0xdc,
-	0x4a, 0x21, 0xac, 0xe4, 0xf5, 0x9e, 0xcf, 0x99, 0xfb, 0xe9, 0xee, 0xc2, 0x7d, 0xae, 0xd5, 0x82,
-	0xe3, 0x38, 0x6d, 0xe3, 0x0c, 0x9d, 0xb7, 0x95, 0xd7, 0xd5, 0x31, 0xd5, 0x73, 0x59, 0xfe, 0x7f,
-	0x58, 0xde, 0x15, 0xa1, 0xc8, 0xf8, 0xc2, 0xa3, 0xd8, 0xff, 0xd9, 0x81, 0xcb, 0xb9, 0x8e, 0xf1,
-	0xee, 0x43, 0xa8, 0x1e, 0x10, 0x26, 0xc8, 0x2b, 0xc2, 0x8d, 0x57, 0xee, 0xac, 0x57, 0x5f, 0x2a,
-	0x0d, 0x3c, 0xd1, 0x44, 0x9b, 0x50, 0xe5, 0x8a, 0x87, 0xe4, 0x89, 0x5a, 0x5d, 0x84, 0x32, 0xeb,
-	0x4d, 0xf4, 0x51, 0x1b, 0x96, 0x62, 0x3a, 0xe4, 0x66, 0xcf, 0xfc, 0x77, 0x11, 0xee, 0x09, 0x1d,
-	0x62, 0xa5, 0xe8, 0x1f, 0x16, 0xa0, 0xac, 0xe7, 0xd0, 0x63, 0x28, 0x0f, 0xa2, 0x21, 0xe1, 0x42,
-	0x7b, 0xd5, 0xd9, 0x90, 0x07, 0xdf, 0x9b, 0xc3, 0xb5, 0x9b, 0xd6, 0xc9, 0x46, 0x53, 0x92, 0xc8,
-	0x16, 0x2f, 0x8c, 0x12, 0xc2, 0x78, 0x7b, 0x48, 0x6f, 0x6b, 0x48, 0xd0, 0x55, 0x1f, 0x6c, 0x18,
-	0x24, 0x57, 0x94, 0xa4, 0x99, 0xd0, 0x1e, 0x5c, 0x90, 0x4b, 0x33, 0xc8, 0x4a, 0x4e, 0xc2, 0x11,
-	0x31, 0xf7, 0x95, 0x1a, 0xcb, 0x2b, 0xb3, 0x2f, 0x4b, 0x75, 0xa0, 0x1a, 0x89, 0x2a, 0x36, 0x12,
-	0xda, 0x84, 0x0a, 0x17, 0x21, 0x93, 0xc7, 0x46, 0xe9, 0x8c, 0x77, 0x7d, 0x0e, 0x40, 0x9f, 0x41,
-	0xad, 0x4f, 0x47, 0x69, 0x4c, 0x24, 0xba, 0x7c, 0x46, 0xf4, 0x11, 0x44, 0x56, 0x0f, 0x61, 0x8c,
-	0x32, 0xd5, 0x65, 0xd4, 0xb0, 0x16, 0xfc, 0xbf, 0x0a, 0xd0, 0xb0, 0x93, 0x35, 0xd3, 0x41, 0x3d,
-	0x86, 0xb2, 0x4e, 0xbd, 0xae, 0xba, 0x8b, 0x85, 0x4a, 0x33, 0xcc, 0x0d, 0x95, 0x0b, 0x95, 0x7e,
-	0xc6, 0x54, 0x7b, 0xa5, 0x9b, 0xae, 0x5c, 0x94, 0x06, 0x0b, 0x2a, 0xc2, 0x58, 0x85, 0xaa, 0x88,
-	0xb5, 0x20, 0xbb, 0xae, 0x49, 0x93, 0x7d, 0xbe, 0xae, 0x6b, 0x02, 0xb3, 0xd3, 0x50, 0x79, 0xa7,
-	0x34, 0x54, 0xcf, 0x9d, 0x06, 0xff, 0x57, 0x07, 0x6a, 0x93, 0x2a, 0xb7, 0xa2, 0xeb, 0xbc, 0x73,
-	0x74, 0xa7, 0x22, 0x53, 0xb8, 0x58, 0x64, 0xae, 0x43, 0x99, 0x0b, 0x46, 0xc2, 0x91, 0x7e, 0x0f,
-	0x60, 0x23, 0xc9, 0xf3, 0x64, 0xc4, 0x87, 0x2a, 0x43, 0x0d, 0x2c, 0x87, 0xbe, 0x0f, 0x0d, 0xd5,
-	0xfa, 0xef, 0x10, 0x2e, 0x9b, 0x4d, 0x99, 0xdb, 0x41, 0x28, 0x42, 0xe5, 0x47, 0x03, 0xab, 0xb1,
-	0x7f, 0x0b, 0xd0, 0x93, 0x88, 0x8b, 0x17, 0xea, 0xc9, 0xc2, 0x4f, 0x7b, 0x17, 0xec, 0xc2, 0xd5,
-	0x29, 0x6d, 0x73, 0x4a, 0x7d, 0x72, 0xec, 0x65, 0xf0, 0xde, 0xec, 0xa9, 0xa1, 0x5e, 0x46, 0x81,
-	0x06, 0x4e, 0x3f, 0x10, 0x36, 0xfe, 0x2c, 0x42, 0x65, 0x4b, 0x3f, 0xfa, 0xd0, 0x33, 0xa8, 0x4d,
-	0x1e, 0x1e, 0xc8, 0x9f, 0xa5, 0x39, 0xfe, 0x82, 0xf1, 0x6e, 0x9c, 0xa8, 0x63, 0xec, 0x7b, 0x04,
-	0x25, 0xf5, 0x04, 0x43, 0x73, 0x8e, 0x41, 0xfb, 0x6d, 0xe6, 0x9d, 0xfc, 0xa4, 0x59, 0x77, 0x24,
-	0x93, 0xba, 0x43, 0xe6, 0x31, 0xd9, 0x4d, 0xa4, 0xb7, 0x76, 0xca, 0xe5, 0x83, 0x76, 0xa0, 0x6c,
-	0xb6, 0xf3, 0x3c, 0x55, 0xfb, 0xa6, 0xf0, 0x9a, 0x8b, 0x15, 0x34, 0xd9, 0xba, 0x83, 0x76, 0x26,
-	0x1d, 0xf2, 0x3c, 0xd3, 0xec, 0x32, 0xf0, 0x4e, 0xf9, 0xdf, 0x72, 0xd6, 0x1d, 0xf4, 0x15, 0xd4,
-	0xad, 0x44, 0xa3, 0x39, 0x09, 0x9d, 0xad, 0x1a, 0xef, 0xfd, 0x53, 0xb4, 0xb4, 0xb1, 0x9d, 0xc6,
-	0xeb, 0xb7, 0xab, 0xce, 0x6f, 0x6f, 0x57, 0x9d, 0x3f, 0xde, 0xae, 0x3a, 0xbd, 0xb2, 0xaa, 0xfb,
-	0x0f, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xd3, 0x7a, 0xbe, 0x54, 0xf8, 0x0f, 0x00, 0x00,
+func init() { proto.RegisterFile("control.proto", fileDescriptor_control_7e741c2ad6bf4a8a) }
+
+var fileDescriptor_control_7e741c2ad6bf4a8a = []byte{
+	// 1397 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4d, 0x6f, 0x1b, 0xc5,
+	0x1b, 0xef, 0xda, 0xf1, 0xdb, 0x63, 0x27, 0x4a, 0xa7, 0xfd, 0x57, 0xab, 0xfd, 0x8b, 0xc4, 0x6c,
+	0x8b, 0x64, 0x55, 0xed, 0x3a, 0x35, 0x14, 0x95, 0x08, 0x50, 0xeb, 0xb8, 0xa8, 0xa9, 0x1a, 0x51,
+	0x36, 0x2d, 0x95, 0x7a, 0x40, 0x5a, 0xdb, 0x13, 0x77, 0x95, 0xf5, 0xce, 0x32, 0x33, 0x1b, 0x6a,
+	0x3e, 0x00, 0x67, 0xbe, 0x03, 0x07, 0x4e, 0x9c, 0x38, 0xf0, 0x09, 0x90, 0x7a, 0xe4, 0xdc, 0x43,
+	0x40, 0xb9, 0xc3, 0x9d, 0x1b, 0x9a, 0x97, 0x75, 0xc6, 0xb1, 0x9d, 0xc4, 0xe9, 0xc9, 0xf3, 0x8c,
+	0x9f, 0xdf, 0x6f, 0x9f, 0xd7, 0x99, 0x79, 0x60, 0xb9, 0x47, 0x62, 0x4e, 0x49, 0xe4, 0x25, 0x94,
+	0x70, 0x82, 0x56, 0x87, 0xa4, 0x3b, 0xf2, 0xba, 0x69, 0x18, 0xf5, 0xf7, 0x43, 0xee, 0x1d, 0xdc,
+	0x71, 0x6e, 0x0f, 0x42, 0xfe, 0x2a, 0xed, 0x7a, 0x3d, 0x32, 0x6c, 0x0e, 0xc8, 0x80, 0x34, 0xa5,
+	0x62, 0x37, 0xdd, 0x93, 0x92, 0x14, 0xe4, 0x4a, 0x11, 0x38, 0xeb, 0x03, 0x42, 0x06, 0x11, 0x3e,
+	0xd6, 0xe2, 0xe1, 0x10, 0x33, 0x1e, 0x0c, 0x13, 0xad, 0x70, 0xcb, 0xe0, 0x13, 0x1f, 0x6b, 0x66,
+	0x1f, 0x6b, 0x32, 0x12, 0x1d, 0x60, 0xda, 0x4c, 0xba, 0x4d, 0x92, 0x30, 0xad, 0xdd, 0x9c, 0xab,
+	0x1d, 0x24, 0x61, 0x93, 0x8f, 0x12, 0xcc, 0x9a, 0xdf, 0x11, 0xba, 0x8f, 0xa9, 0x02, 0xb8, 0x3f,
+	0x58, 0x50, 0x7b, 0x4a, 0xd3, 0x18, 0xfb, 0xf8, 0xdb, 0x14, 0x33, 0x8e, 0xae, 0x41, 0x71, 0x2f,
+	0x8c, 0x38, 0xa6, 0xb6, 0x55, 0xcf, 0x37, 0x2a, 0xbe, 0x96, 0xd0, 0x2a, 0xe4, 0x83, 0x28, 0xb2,
+	0x73, 0x75, 0xab, 0x51, 0xf6, 0xc5, 0x12, 0x35, 0xa0, 0xb6, 0x8f, 0x71, 0xd2, 0x49, 0x69, 0xc0,
+	0x43, 0x12, 0xdb, 0xf9, 0xba, 0xd5, 0xc8, 0xb7, 0x97, 0xde, 0x1c, 0xae, 0x5b, 0xfe, 0xc4, 0x3f,
+	0xc8, 0x85, 0x8a, 0x90, 0xdb, 0x23, 0x8e, 0x99, 0xbd, 0x64, 0xa8, 0x1d, 0x6f, 0xbb, 0x37, 0x61,
+	0xb5, 0x13, 0xb2, 0xfd, 0xe7, 0x2c, 0x18, 0x9c, 0x65, 0x8b, 0xfb, 0x18, 0x2e, 0x1b, 0xba, 0x2c,
+	0x21, 0x31, 0xc3, 0xe8, 0x2e, 0x14, 0x29, 0xee, 0x11, 0xda, 0x97, 0xca, 0xd5, 0xd6, 0x7b, 0xde,
+	0xc9, 0xdc, 0x78, 0x1a, 0x20, 0x94, 0x7c, 0xad, 0xec, 0xfe, 0x9b, 0x83, 0xaa, 0xb1, 0x8f, 0x56,
+	0x20, 0xb7, 0xdd, 0xb1, 0xad, 0xba, 0xd5, 0xa8, 0xf8, 0xb9, 0xed, 0x0e, 0xb2, 0xa1, 0xb4, 0x93,
+	0xf2, 0xa0, 0x1b, 0x61, 0xed, 0x7b, 0x26, 0xa2, 0xab, 0x50, 0xd8, 0x8e, 0x9f, 0x33, 0x2c, 0x1d,
+	0x2f, 0xfb, 0x4a, 0x40, 0x08, 0x96, 0x76, 0xc3, 0xef, 0xb1, 0x72, 0xd3, 0x97, 0x6b, 0xe1, 0xc7,
+	0xd3, 0x80, 0xe2, 0x98, 0xdb, 0x05, 0xc9, 0xab, 0x25, 0xd4, 0x86, 0xca, 0x16, 0xc5, 0x01, 0xc7,
+	0xfd, 0x07, 0xdc, 0x2e, 0xd6, 0xad, 0x46, 0xb5, 0xe5, 0x78, 0xaa, 0x20, 0xbc, 0xac, 0x20, 0xbc,
+	0x67, 0x59, 0x41, 0xb4, 0xcb, 0x6f, 0x0e, 0xd7, 0x2f, 0xfd, 0xf8, 0xa7, 0x88, 0xdb, 0x18, 0x86,
+	0xee, 0x03, 0x3c, 0x09, 0x18, 0x7f, 0xce, 0x24, 0x49, 0xe9, 0x4c, 0x92, 0x25, 0x49, 0x60, 0x60,
+	0xd0, 0x1a, 0x80, 0x0c, 0xc0, 0x16, 0x49, 0x63, 0x6e, 0x97, 0xa5, 0xdd, 0xc6, 0x0e, 0xaa, 0x43,
+	0xb5, 0x83, 0x59, 0x8f, 0x86, 0x89, 0x4c, 0x73, 0x45, 0xba, 0x60, 0x6e, 0x09, 0x06, 0x15, 0xbd,
+	0x67, 0xa3, 0x04, 0xdb, 0x20, 0x15, 0x8c, 0x1d, 0xe1, 0xff, 0xee, 0xab, 0x80, 0xe2, 0xbe, 0x5d,
+	0x95, 0xa1, 0xd2, 0x92, 0xfb, 0x53, 0x11, 0x6a, 0xbb, 0xa2, 0x8a, 0xb3, 0x84, 0xaf, 0x42, 0xde,
+	0xc7, 0x7b, 0x3a, 0xfa, 0x62, 0x89, 0x3c, 0x80, 0x0e, 0xde, 0x0b, 0xe3, 0x50, 0x7e, 0x3b, 0x27,
+	0xdd, 0x5b, 0xf1, 0x92, 0xae, 0x77, 0xbc, 0xeb, 0x1b, 0x1a, 0xc8, 0x81, 0xf2, 0xc3, 0xd7, 0x09,
+	0xa1, 0xa2, 0x68, 0xf2, 0x92, 0x66, 0x2c, 0xa3, 0x17, 0xb0, 0x9c, 0xad, 0x1f, 0x70, 0x4e, 0x45,
+	0x29, 0x8a, 0x42, 0xb9, 0x33, 0x5d, 0x28, 0xa6, 0x51, 0xde, 0x04, 0xe6, 0x61, 0xcc, 0xe9, 0xc8,
+	0x9f, 0xe4, 0x11, 0x35, 0xb2, 0x8b, 0x19, 0x13, 0x16, 0xaa, 0x04, 0x67, 0xa2, 0x30, 0xe7, 0x0b,
+	0x4a, 0x62, 0x8e, 0xe3, 0xbe, 0x4c, 0x70, 0xc5, 0x1f, 0xcb, 0xc2, 0x9c, 0x6c, 0xad, 0xcc, 0x29,
+	0x9d, 0xcb, 0x9c, 0x09, 0x8c, 0x36, 0x67, 0x62, 0x0f, 0x6d, 0x42, 0x61, 0x2b, 0xe8, 0xbd, 0xc2,
+	0x32, 0x97, 0xd5, 0xd6, 0xda, 0x34, 0xa1, 0xfc, 0xfb, 0x4b, 0x99, 0x3c, 0x26, 0x5b, 0xf1, 0x92,
+	0xaf, 0x20, 0xe8, 0x1b, 0xa8, 0x3d, 0x8c, 0x79, 0xc8, 0x23, 0x3c, 0xc4, 0x31, 0x67, 0x76, 0x45,
+	0x34, 0x5e, 0x7b, 0xf3, 0xed, 0xe1, 0xfa, 0xc7, 0x73, 0x8f, 0x96, 0x94, 0x87, 0x51, 0x13, 0x1b,
+	0x28, 0xcf, 0xa0, 0xf0, 0x27, 0xf8, 0xd0, 0x4b, 0x58, 0xc9, 0x8c, 0xdd, 0x8e, 0x93, 0x94, 0x33,
+	0x1b, 0xa4, 0xd7, 0xad, 0x73, 0x7a, 0xad, 0x40, 0xca, 0xed, 0x13, 0x4c, 0xce, 0x7d, 0x40, 0xd3,
+	0xb9, 0x12, 0x35, 0xb5, 0x8f, 0x47, 0x59, 0x4d, 0xed, 0xe3, 0x91, 0x68, 0xdc, 0x83, 0x20, 0x4a,
+	0x55, 0x43, 0x57, 0x7c, 0x25, 0x6c, 0xe6, 0xee, 0x59, 0x82, 0x61, 0x3a, 0xbc, 0x0b, 0x31, 0x7c,
+	0x05, 0x57, 0x66, 0x98, 0x3a, 0x83, 0xe2, 0x86, 0x49, 0x31, 0x5d, 0xd3, 0xc7, 0x94, 0xee, 0x2f,
+	0x79, 0xa8, 0x99, 0x09, 0x43, 0x1b, 0x70, 0x45, 0xf9, 0xe9, 0xe3, 0xbd, 0x0e, 0x4e, 0x28, 0xee,
+	0x89, 0xb3, 0x40, 0x93, 0xcf, 0xfa, 0x0b, 0xb5, 0xe0, 0xea, 0xf6, 0x50, 0x6f, 0x33, 0x03, 0x92,
+	0x93, 0xc7, 0xea, 0xcc, 0xff, 0x10, 0x81, 0xff, 0x29, 0x2a, 0x19, 0x09, 0x03, 0x94, 0x97, 0x09,
+	0xfb, 0xe4, 0xf4, 0xaa, 0xf2, 0x66, 0x62, 0x55, 0xde, 0x66, 0xf3, 0xa2, 0xcf, 0xa0, 0xa4, 0xfe,
+	0xc8, 0x1a, 0xf3, 0xfa, 0xe9, 0x9f, 0x50, 0x64, 0x19, 0x46, 0xc0, 0x95, 0x1f, 0xcc, 0x2e, 0x2c,
+	0x00, 0xd7, 0x18, 0xe7, 0x11, 0x38, 0xf3, 0x4d, 0x5e, 0xa4, 0x04, 0xdc, 0x9f, 0x2d, 0xb8, 0x3c,
+	0xf5, 0x21, 0x71, 0x2f, 0xc8, 0xd3, 0x51, 0x51, 0xc8, 0x35, 0xea, 0x40, 0x41, 0x75, 0x7e, 0x4e,
+	0x1a, 0xec, 0x9d, 0xc3, 0x60, 0xcf, 0x68, 0x7b, 0x05, 0x76, 0xee, 0x01, 0x5c, 0xac, 0x58, 0xdd,
+	0xdf, 0x2c, 0x58, 0xd6, 0x5d, 0xa6, 0x2f, 0xd1, 0x00, 0x56, 0xb3, 0x16, 0xca, 0xf6, 0xf4, 0x75,
+	0x7a, 0x77, 0x6e, 0x83, 0x2a, 0x35, 0xef, 0x24, 0x4e, 0xd9, 0x38, 0x45, 0xe7, 0x6c, 0x65, 0x75,
+	0x75, 0x42, 0x75, 0x21, 0xcb, 0xdf, 0x87, 0xe5, 0x5d, 0x1e, 0xf0, 0x94, 0xcd, 0xbd, 0x39, 0xdc,
+	0x5f, 0x2d, 0x58, 0xc9, 0x74, 0xb4, 0x77, 0x1f, 0x41, 0xf9, 0x00, 0x53, 0x8e, 0x5f, 0x63, 0xa6,
+	0xbd, 0xb2, 0xa7, 0xbd, 0xfa, 0x5a, 0x6a, 0xf8, 0x63, 0x4d, 0xb4, 0x09, 0x65, 0x26, 0x79, 0x70,
+	0x96, 0xa8, 0xb5, 0x79, 0x28, 0xfd, 0xbd, 0xb1, 0x3e, 0x6a, 0xc2, 0x52, 0x44, 0x06, 0x4c, 0xf7,
+	0xcc, 0xff, 0xe7, 0xe1, 0x9e, 0x90, 0x81, 0x2f, 0x15, 0xdd, 0xc3, 0x1c, 0x14, 0xd5, 0x1e, 0x7a,
+	0x0c, 0xc5, 0x7e, 0x38, 0xc0, 0x8c, 0x2b, 0xaf, 0xda, 0x2d, 0x71, 0x4e, 0xbf, 0x3d, 0x5c, 0xbf,
+	0x69, 0x1c, 0xc4, 0x24, 0xc1, 0xb1, 0x78, 0x91, 0x06, 0x61, 0x8c, 0x29, 0x6b, 0x0e, 0xc8, 0x6d,
+	0x05, 0xf1, 0x3a, 0xf2, 0xc7, 0xd7, 0x0c, 0x82, 0x2b, 0x54, 0xc7, 0xad, 0x6c, 0xf9, 0x8b, 0x71,
+	0x29, 0x06, 0x51, 0xc9, 0x71, 0x30, 0xc4, 0xfa, 0x7a, 0x95, 0x6b, 0x71, 0xc3, 0xf7, 0x44, 0xa9,
+	0xf6, 0xe5, 0xbb, 0xa7, 0xec, 0x6b, 0x09, 0x6d, 0x42, 0x89, 0xf1, 0x80, 0x8a, 0x63, 0xa3, 0x70,
+	0xce, 0xa7, 0x49, 0x06, 0x40, 0x9f, 0x43, 0xa5, 0x47, 0x86, 0x49, 0x84, 0x05, 0xba, 0x78, 0x4e,
+	0xf4, 0x31, 0x44, 0x54, 0x0f, 0xa6, 0x94, 0x50, 0xf9, 0x28, 0xaa, 0xf8, 0x4a, 0x70, 0xff, 0xc9,
+	0x41, 0xcd, 0x4c, 0xd6, 0xd4, 0x83, 0xef, 0x31, 0x14, 0x55, 0xea, 0x55, 0xd5, 0x5d, 0x2c, 0x54,
+	0x8a, 0x61, 0x66, 0xa8, 0x6c, 0x28, 0xf5, 0x52, 0x2a, 0x5f, 0x83, 0xea, 0x8d, 0x98, 0x89, 0xc2,
+	0x60, 0x4e, 0x78, 0x10, 0xc9, 0x50, 0xe5, 0x7d, 0x25, 0x88, 0x47, 0xe2, 0x78, 0x26, 0x58, 0xec,
+	0x91, 0x38, 0x86, 0x99, 0x69, 0x28, 0xbd, 0x53, 0x1a, 0xca, 0x0b, 0xa7, 0xc1, 0xfd, 0xdd, 0x82,
+	0xca, 0xb8, 0xca, 0x8d, 0xe8, 0x5a, 0xef, 0x1c, 0xdd, 0x89, 0xc8, 0xe4, 0x2e, 0x16, 0x99, 0x6b,
+	0x50, 0x64, 0x9c, 0xe2, 0x60, 0xa8, 0xc6, 0x17, 0x5f, 0x4b, 0xe2, 0x3c, 0x19, 0xb2, 0x81, 0xcc,
+	0x50, 0xcd, 0x17, 0x4b, 0xd7, 0x85, 0x9a, 0x9c, 0x54, 0x76, 0x30, 0x13, 0x6f, 0x63, 0x91, 0xdb,
+	0x7e, 0xc0, 0x03, 0xe9, 0x47, 0xcd, 0x97, 0x6b, 0xf7, 0x16, 0xa0, 0x27, 0x21, 0xe3, 0x2f, 0xe4,
+	0x84, 0xc5, 0xce, 0x1a, 0x63, 0x76, 0xe1, 0xca, 0x84, 0xb6, 0x3e, 0xa5, 0x3e, 0x3d, 0x31, 0xc8,
+	0xdc, 0x98, 0x3e, 0x35, 0xe4, 0x20, 0xe7, 0x29, 0xe0, 0xe4, 0x3c, 0xd3, 0xfa, 0x3b, 0x0f, 0xa5,
+	0x2d, 0x35, 0xa3, 0xa2, 0x67, 0x50, 0x19, 0xcf, 0x49, 0xc8, 0x9d, 0xa6, 0x39, 0x39, 0x70, 0x39,
+	0xd7, 0x4f, 0xd5, 0xd1, 0xf6, 0x3d, 0x82, 0x82, 0x9c, 0x18, 0xd1, 0x8c, 0x63, 0xd0, 0x1c, 0x25,
+	0x9d, 0xd3, 0x27, 0xb0, 0x0d, 0x4b, 0x30, 0xc9, 0x3b, 0x64, 0x16, 0x93, 0xf9, 0xfa, 0x73, 0xd6,
+	0xcf, 0xb8, 0x7c, 0xd0, 0x0e, 0x14, 0x75, 0x3b, 0xcf, 0x52, 0x35, 0x6f, 0x0a, 0xa7, 0x3e, 0x5f,
+	0x41, 0x91, 0x6d, 0x58, 0x68, 0x67, 0xfc, 0xa0, 0x9f, 0x65, 0x9a, 0x59, 0x06, 0xce, 0x19, 0xff,
+	0x37, 0xac, 0x0d, 0x0b, 0xbd, 0x84, 0xaa, 0x91, 0x68, 0x34, 0x23, 0xa1, 0xd3, 0x55, 0xe3, 0x7c,
+	0x70, 0x86, 0x96, 0x32, 0xb6, 0x5d, 0x7b, 0x73, 0xb4, 0x66, 0xfd, 0x71, 0xb4, 0x66, 0xfd, 0x75,
+	0xb4, 0x66, 0x75, 0x8b, 0xb2, 0xee, 0x3f, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x65, 0x7c,
+	0xd6, 0xa7, 0x10, 0x00, 0x00,
 }
 }

+ 1 - 0
vendor/github.com/moby/buildkit/api/services/control/control.proto

@@ -63,6 +63,7 @@ message SolveRequest {
 	map<string, string> FrontendAttrs = 7;
 	map<string, string> FrontendAttrs = 7;
 	CacheOptions Cache = 8 [(gogoproto.nullable) = false];
 	CacheOptions Cache = 8 [(gogoproto.nullable) = false];
 	repeated string Entitlements = 9 [(gogoproto.customtype) = "github.com/moby/buildkit/util/entitlements.Entitlement" ];
 	repeated string Entitlements = 9 [(gogoproto.customtype) = "github.com/moby/buildkit/util/entitlements.Entitlement" ];
+	map<string, pb.Definition> FrontendInputs = 10;
 }
 }
 
 
 message CacheOptions {
 message CacheOptions {

+ 1 - 1
vendor/github.com/moby/buildkit/cache/manager.go

@@ -111,7 +111,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor,
 	if parent != nil {
 	if parent != nil {
 		pInfo := parent.Info()
 		pInfo := parent.Info()
 		if pInfo.ChainID == "" || pInfo.BlobChainID == "" {
 		if pInfo.ChainID == "" || pInfo.BlobChainID == "" {
-			return nil, errors.Errorf("failed to get ref by blob on non-adressable parent")
+			return nil, errors.Errorf("failed to get ref by blob on non-addressable parent")
 		}
 		}
 		chainID = imagespecidentity.ChainID([]digest.Digest{pInfo.ChainID, chainID})
 		chainID = imagespecidentity.ChainID([]digest.Digest{pInfo.ChainID, chainID})
 		blobChainID = imagespecidentity.ChainID([]digest.Digest{pInfo.BlobChainID, blobChainID})
 		blobChainID = imagespecidentity.ChainID([]digest.Digest{pInfo.BlobChainID, blobChainID})

+ 4 - 1
vendor/github.com/moby/buildkit/cache/refs.go

@@ -254,7 +254,10 @@ func (cr *cacheRecord) Mount(ctx context.Context, readonly bool) (snapshot.Mount
 func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) error {
 func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) error {
 	delete(cr.cm.records, cr.ID())
 	delete(cr.cm.records, cr.ID())
 	if cr.parent != nil {
 	if cr.parent != nil {
-		if err := cr.parent.release(ctx); err != nil {
+		cr.parent.mu.Lock()
+		err := cr.parent.release(ctx)
+		cr.parent.mu.Unlock()
+		if err != nil {
 			return err
 			return err
 		}
 		}
 	}
 	}

+ 5 - 0
vendor/github.com/moby/buildkit/client/build.go

@@ -103,3 +103,8 @@ func (g *gatewayClientForBuild) Return(ctx context.Context, in *gatewayapi.Retur
 	ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
 	ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
 	return g.gateway.Return(ctx, in, opts...)
 	return g.gateway.Return(ctx, in, opts...)
 }
 }
+
+func (g *gatewayClientForBuild) Inputs(ctx context.Context, in *gatewayapi.InputsRequest, opts ...grpc.CallOption) (*gatewayapi.InputsResponse, error) {
+	ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
+	return g.gateway.Inputs(ctx, in, opts...)
+}

+ 161 - 0
vendor/github.com/moby/buildkit/client/llb/definition.go

@@ -0,0 +1,161 @@
+package llb
+
+import (
+	"github.com/moby/buildkit/solver/pb"
+	digest "github.com/opencontainers/go-digest"
+	specs "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+// DefinitionOp implements llb.Vertex using a marshalled definition.
+//
+// For example, after marshalling a LLB state and sending over the wire, the
+// LLB state can be reconstructed from the definition.
+type DefinitionOp struct {
+	MarshalCache
+	ops       map[digest.Digest]*pb.Op
+	defs      map[digest.Digest][]byte
+	metas     map[digest.Digest]pb.OpMetadata
+	platforms map[digest.Digest]*specs.Platform
+	dgst      digest.Digest
+	index     pb.OutputIndex
+}
+
+// NewDefinitionOp returns a new operation from a marshalled definition.
+func NewDefinitionOp(def *pb.Definition) (*DefinitionOp, error) {
+	ops := make(map[digest.Digest]*pb.Op)
+	defs := make(map[digest.Digest][]byte)
+
+	var dgst digest.Digest
+	for _, dt := range def.Def {
+		var op pb.Op
+		if err := (&op).Unmarshal(dt); err != nil {
+			return nil, errors.Wrap(err, "failed to parse llb proto op")
+		}
+		dgst = digest.FromBytes(dt)
+		ops[dgst] = &op
+		defs[dgst] = dt
+	}
+
+	var index pb.OutputIndex
+	if dgst != "" {
+		index = ops[dgst].Inputs[0].Index
+		dgst = ops[dgst].Inputs[0].Digest
+	}
+
+	return &DefinitionOp{
+		ops:       ops,
+		defs:      defs,
+		metas:     def.Metadata,
+		platforms: make(map[digest.Digest]*specs.Platform),
+		dgst:      dgst,
+		index:     index,
+	}, nil
+}
+
+func (d *DefinitionOp) ToInput(c *Constraints) (*pb.Input, error) {
+	return d.Output().ToInput(c)
+}
+
+func (d *DefinitionOp) Vertex() Vertex {
+	return d
+}
+
+func (d *DefinitionOp) Validate() error {
+	// Scratch state has no digest, ops or metas.
+	if d.dgst == "" {
+		return nil
+	}
+
+	if len(d.ops) == 0 || len(d.defs) == 0 || len(d.metas) == 0 {
+		return errors.Errorf("invalid definition op with no ops %d %d", len(d.ops), len(d.metas))
+	}
+
+	_, ok := d.ops[d.dgst]
+	if !ok {
+		return errors.Errorf("invalid definition op with unknown op %q", d.dgst)
+	}
+
+	_, ok = d.defs[d.dgst]
+	if !ok {
+		return errors.Errorf("invalid definition op with unknown def %q", d.dgst)
+	}
+
+	_, ok = d.metas[d.dgst]
+	if !ok {
+		return errors.Errorf("invalid definition op with unknown metas %q", d.dgst)
+	}
+
+	// It is possible for d.index >= len(d.ops[d.dgst]) when depending on scratch
+	// images.
+	if d.index < 0 {
+		return errors.Errorf("invalid definition op with invalid index")
+	}
+
+	return nil
+}
+
+func (d *DefinitionOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) {
+	if d.dgst == "" {
+		return "", nil, nil, errors.Errorf("cannot marshal empty definition op")
+	}
+
+	if err := d.Validate(); err != nil {
+		return "", nil, nil, err
+	}
+
+	meta := d.metas[d.dgst]
+	return d.dgst, d.defs[d.dgst], &meta, nil
+
+}
+
+func (d *DefinitionOp) Output() Output {
+	if d.dgst == "" {
+		return nil
+	}
+
+	return &output{vertex: d, platform: d.platform(), getIndex: func() (pb.OutputIndex, error) {
+		return d.index, nil
+	}}
+}
+
+func (d *DefinitionOp) Inputs() []Output {
+	if d.dgst == "" {
+		return nil
+	}
+
+	var inputs []Output
+
+	op := d.ops[d.dgst]
+	for _, input := range op.Inputs {
+		vtx := &DefinitionOp{
+			ops:       d.ops,
+			defs:      d.defs,
+			metas:     d.metas,
+			platforms: d.platforms,
+			dgst:      input.Digest,
+			index:     input.Index,
+		}
+		inputs = append(inputs, &output{vertex: vtx, platform: d.platform(), getIndex: func() (pb.OutputIndex, error) {
+			return pb.OutputIndex(vtx.index), nil
+		}})
+	}
+
+	return inputs
+}
+
+func (d *DefinitionOp) platform() *specs.Platform {
+	platform, ok := d.platforms[d.dgst]
+	if ok {
+		return platform
+	}
+
+	op := d.ops[d.dgst]
+	if op.Platform != nil {
+		spec := op.Platform.Spec()
+		platform = &spec
+	}
+
+	d.platforms[d.dgst] = platform
+	return platform
+}

+ 1 - 2
vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go

@@ -10,7 +10,6 @@ import (
 	"github.com/containerd/containerd/remotes/docker"
 	"github.com/containerd/containerd/remotes/docker"
 	"github.com/docker/docker/pkg/locker"
 	"github.com/docker/docker/pkg/locker"
 	"github.com/moby/buildkit/client/llb"
 	"github.com/moby/buildkit/client/llb"
-	gw "github.com/moby/buildkit/frontend/gateway/client"
 	"github.com/moby/buildkit/util/contentutil"
 	"github.com/moby/buildkit/util/contentutil"
 	"github.com/moby/buildkit/util/imageutil"
 	"github.com/moby/buildkit/util/imageutil"
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
@@ -72,7 +71,7 @@ type resolveResult struct {
 	dgst   digest.Digest
 	dgst   digest.Digest
 }
 }
 
 
-func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error) {
+func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) {
 	imr.locker.Lock(ref)
 	imr.locker.Lock(ref)
 	defer imr.locker.Unlock(ref)
 	defer imr.locker.Unlock(ref)
 
 

+ 8 - 2
vendor/github.com/moby/buildkit/client/llb/resolver.go

@@ -3,8 +3,8 @@ package llb
 import (
 import (
 	"context"
 	"context"
 
 
-	gw "github.com/moby/buildkit/frontend/gateway/client"
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
+	specs "github.com/opencontainers/image-spec/specs-go/v1"
 )
 )
 
 
 // WithMetaResolver adds a metadata resolver to an image
 // WithMetaResolver adds a metadata resolver to an image
@@ -16,5 +16,11 @@ func WithMetaResolver(mr ImageMetaResolver) ImageOption {
 
 
 // ImageMetaResolver can resolve image config metadata from a reference
 // ImageMetaResolver can resolve image config metadata from a reference
 type ImageMetaResolver interface {
 type ImageMetaResolver interface {
-	ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error)
+	ResolveImageConfig(ctx context.Context, ref string, opt ResolveImageConfigOpt) (digest.Digest, []byte, error)
+}
+
+type ResolveImageConfigOpt struct {
+	Platform    *specs.Platform
+	ResolveMode string
+	LogName     string
 }
 }

+ 1 - 2
vendor/github.com/moby/buildkit/client/llb/source.go

@@ -9,7 +9,6 @@ import (
 	"strings"
 	"strings"
 
 
 	"github.com/docker/distribution/reference"
 	"github.com/docker/distribution/reference"
-	gw "github.com/moby/buildkit/frontend/gateway/client"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/util/apicaps"
 	"github.com/moby/buildkit/util/apicaps"
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
@@ -119,7 +118,7 @@ func Image(ref string, opts ...ImageOption) State {
 		src.err = err
 		src.err = err
 	}
 	}
 	if info.metaResolver != nil {
 	if info.metaResolver != nil {
-		_, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, gw.ResolveImageConfigOpt{
+		_, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, ResolveImageConfigOpt{
 			Platform:    info.Constraints.Platform,
 			Platform:    info.Constraints.Platform,
 			ResolveMode: info.resolveMode.String(),
 			ResolveMode: info.resolveMode.String(),
 		})
 		})

+ 27 - 13
vendor/github.com/moby/buildkit/client/solve.go

@@ -35,6 +35,7 @@ type SolveOpt struct {
 	SharedKey             string
 	SharedKey             string
 	Frontend              string
 	Frontend              string
 	FrontendAttrs         map[string]string
 	FrontendAttrs         map[string]string
+	FrontendInputs        map[string]llb.State
 	CacheExports          []CacheOptionsEntry
 	CacheExports          []CacheOptionsEntry
 	CacheImports          []CacheOptionsEntry
 	CacheImports          []CacheOptionsEntry
 	Session               []session.Attachable
 	Session               []session.Attachable
@@ -188,16 +189,27 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
 		if def != nil {
 		if def != nil {
 			pbd = def.ToPB()
 			pbd = def.ToPB()
 		}
 		}
+
+		frontendInputs := make(map[string]*pb.Definition)
+		for key, st := range opt.FrontendInputs {
+			def, err := st.Marshal()
+			if err != nil {
+				return err
+			}
+			frontendInputs[key] = def.ToPB()
+		}
+
 		resp, err := c.controlClient().Solve(ctx, &controlapi.SolveRequest{
 		resp, err := c.controlClient().Solve(ctx, &controlapi.SolveRequest{
-			Ref:           ref,
-			Definition:    pbd,
-			Exporter:      ex.Type,
-			ExporterAttrs: ex.Attrs,
-			Session:       s.ID(),
-			Frontend:      opt.Frontend,
-			FrontendAttrs: opt.FrontendAttrs,
-			Cache:         cacheOpt.options,
-			Entitlements:  opt.AllowedEntitlements,
+			Ref:            ref,
+			Definition:     pbd,
+			Exporter:       ex.Type,
+			ExporterAttrs:  ex.Attrs,
+			Session:        s.ID(),
+			Frontend:       opt.Frontend,
+			FrontendAttrs:  opt.FrontendAttrs,
+			FrontendInputs: frontendInputs,
+			Cache:          cacheOpt.options,
+			Entitlements:   opt.AllowedEntitlements,
 		})
 		})
 		if err != nil {
 		if err != nil {
 			return errors.Wrap(err, "failed to solve")
 			return errors.Wrap(err, "failed to solve")
@@ -412,15 +424,15 @@ func parseCacheOptions(opt SolveOpt) (*cacheOptions, error) {
 			}
 			}
 			cs, err := contentlocal.NewStore(csDir)
 			cs, err := contentlocal.NewStore(csDir)
 			if err != nil {
 			if err != nil {
-				return nil, err
+				logrus.Warning("local cache import at " + csDir + " not found due to err: " + err.Error())
+				continue
 			}
 			}
-			contentStores["local:"+csDir] = cs
-
 			// if digest is not specified, load from "latest" tag
 			// if digest is not specified, load from "latest" tag
 			if attrs["digest"] == "" {
 			if attrs["digest"] == "" {
 				idx, err := ociindex.ReadIndexJSONFileLocked(filepath.Join(csDir, "index.json"))
 				idx, err := ociindex.ReadIndexJSONFileLocked(filepath.Join(csDir, "index.json"))
 				if err != nil {
 				if err != nil {
-					return nil, err
+					logrus.Warning("local cache import at " + csDir + " not found due to err: " + err.Error())
+					continue
 				}
 				}
 				for _, m := range idx.Manifests {
 				for _, m := range idx.Manifests {
 					if m.Annotations[ocispec.AnnotationRefName] == "latest" {
 					if m.Annotations[ocispec.AnnotationRefName] == "latest" {
@@ -432,6 +444,8 @@ func parseCacheOptions(opt SolveOpt) (*cacheOptions, error) {
 					return nil, errors.New("local cache importer requires either explicit digest or \"latest\" tag on index.json")
 					return nil, errors.New("local cache importer requires either explicit digest or \"latest\" tag on index.json")
 				}
 				}
 			}
 			}
+			contentStores["local:"+csDir] = cs
+
 		}
 		}
 		if im.Type == "registry" {
 		if im.Type == "registry" {
 			legacyImportRef := attrs["ref"]
 			legacyImportRef := attrs["ref"]

+ 5 - 4
vendor/github.com/moby/buildkit/control/control.go

@@ -274,10 +274,11 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*
 	}
 	}
 
 
 	resp, err := c.solver.Solve(ctx, req.Ref, frontend.SolveRequest{
 	resp, err := c.solver.Solve(ctx, req.Ref, frontend.SolveRequest{
-		Frontend:     req.Frontend,
-		Definition:   req.Definition,
-		FrontendOpt:  req.FrontendAttrs,
-		CacheImports: cacheImports,
+		Frontend:       req.Frontend,
+		Definition:     req.Definition,
+		FrontendOpt:    req.FrontendAttrs,
+		FrontendInputs: req.FrontendInputs,
+		CacheImports:   cacheImports,
 	}, llbsolver.ExporterRequest{
 	}, llbsolver.ExporterRequest{
 		Exporter:        expi,
 		Exporter:        expi,
 		CacheExporter:   cacheExporter,
 		CacheExporter:   cacheExporter,

+ 9 - 0
vendor/github.com/moby/buildkit/control/gateway/gateway.go

@@ -128,6 +128,15 @@ func (gwf *GatewayForwarder) Return(ctx context.Context, req *gwapi.ReturnReques
 	return res, err
 	return res, err
 }
 }
 
 
+func (gwf *GatewayForwarder) Inputs(ctx context.Context, req *gwapi.InputsRequest) (*gwapi.InputsResponse, error) {
+	fwd, err := gwf.lookupForwarder(ctx)
+	if err != nil {
+		return nil, errors.Wrap(err, "forwarding Inputs")
+	}
+	res, err := fwd.Inputs(ctx, req)
+	return res, err
+}
+
 func (gwf *GatewayForwarder) ReadDir(ctx context.Context, req *gwapi.ReadDirRequest) (*gwapi.ReadDirResponse, error) {
 func (gwf *GatewayForwarder) ReadDir(ctx context.Context, req *gwapi.ReadDirRequest) (*gwapi.ReadDirResponse, error) {
 	fwd, err := gwf.lookupForwarder(ctx)
 	fwd, err := gwf.lookupForwarder(ctx)
 	if err != nil {
 	if err != nil {

+ 2 - 2
vendor/github.com/moby/buildkit/executor/oci/spec_unix.go

@@ -18,7 +18,7 @@ import (
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/snapshot"
 	"github.com/moby/buildkit/snapshot"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/solver/pb"
-	"github.com/moby/buildkit/util/entitlements"
+	"github.com/moby/buildkit/util/entitlements/security"
 	"github.com/moby/buildkit/util/network"
 	"github.com/moby/buildkit/util/network"
 	"github.com/moby/buildkit/util/system"
 	"github.com/moby/buildkit/util/system"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
@@ -38,7 +38,7 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou
 		ctx = namespaces.WithNamespace(ctx, "buildkit")
 		ctx = namespaces.WithNamespace(ctx, "buildkit")
 	}
 	}
 	if meta.SecurityMode == pb.SecurityMode_INSECURE {
 	if meta.SecurityMode == pb.SecurityMode_INSECURE {
-		opts = append(opts, entitlements.WithInsecureSpec())
+		opts = append(opts, security.WithInsecureSpec())
 	} else if system.SeccompSupported() && meta.SecurityMode == pb.SecurityMode_SANDBOX {
 	} else if system.SeccompSupported() && meta.SecurityMode == pb.SecurityMode_SANDBOX {
 		opts = append(opts, seccomp.WithDefaultProfile())
 		opts = append(opts, seccomp.WithDefaultProfile())
 	}
 	}

+ 23 - 3
vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go

@@ -20,6 +20,7 @@ import (
 	"github.com/moby/buildkit/exporter/containerimage/exptypes"
 	"github.com/moby/buildkit/exporter/containerimage/exptypes"
 	"github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb"
 	"github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb"
 	"github.com/moby/buildkit/frontend/gateway/client"
 	"github.com/moby/buildkit/frontend/gateway/client"
+	gwpb "github.com/moby/buildkit/frontend/gateway/pb"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/util/apicaps"
 	"github.com/moby/buildkit/util/apicaps"
 	specs "github.com/opencontainers/image-spec/specs-go/v1"
 	specs "github.com/opencontainers/image-spec/specs-go/v1"
@@ -58,6 +59,7 @@ var gitUrlPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
 func Build(ctx context.Context, c client.Client) (*client.Result, error) {
 func Build(ctx context.Context, c client.Client) (*client.Result, error) {
 	opts := c.BuildOpts().Opts
 	opts := c.BuildOpts().Opts
 	caps := c.BuildOpts().LLBCaps
 	caps := c.BuildOpts().LLBCaps
+	gwcaps := c.BuildOpts().Caps
 
 
 	marshalOpts := []llb.ConstraintsOpt{llb.WithCaps(caps)}
 	marshalOpts := []llb.ConstraintsOpt{llb.WithCaps(caps)}
 
 
@@ -129,7 +131,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
 	fileop := useFileOp(opts, &caps)
 	fileop := useFileOp(opts, &caps)
 
 
 	var buildContext *llb.State
 	var buildContext *llb.State
-	isScratchContext := false
+	isNotLocalContext := false
 	if st, ok := detectGitContext(opts[localNameContext], opts[keyContextKeepGitDir]); ok {
 	if st, ok := detectGitContext(opts[localNameContext], opts[keyContextKeepGitDir]); ok {
 		if !forceLocalDockerfile {
 		if !forceLocalDockerfile {
 			src = *st
 			src = *st
@@ -191,7 +193,25 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
 				src = httpContext
 				src = httpContext
 			}
 			}
 			buildContext = &httpContext
 			buildContext = &httpContext
-			isScratchContext = true
+			isNotLocalContext = true
+		}
+	} else if (&gwcaps).Supports(gwpb.CapFrontendInputs) == nil {
+		inputs, err := c.Inputs(ctx)
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to get frontend inputs")
+		}
+
+		if !forceLocalDockerfile {
+			inputDockerfile, ok := inputs[DefaultLocalNameDockerfile]
+			if ok {
+				src = inputDockerfile
+			}
+		}
+
+		inputCtx, ok := inputs[DefaultLocalNameContext]
+		if ok {
+			buildContext = &inputCtx
+			isNotLocalContext = true
 		}
 		}
 	}
 	}
 
 
@@ -239,7 +259,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
 		return nil
 		return nil
 	})
 	})
 	var excludes []string
 	var excludes []string
-	if !isScratchContext {
+	if !isNotLocalContext {
 		eg.Go(func() error {
 		eg.Go(func() error {
 			dockerignoreState := buildContext
 			dockerignoreState := buildContext
 			if dockerignoreState == nil {
 			if dockerignoreState == nil {

+ 8 - 26
vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go

@@ -22,7 +22,6 @@ import (
 	"github.com/moby/buildkit/frontend/dockerfile/instructions"
 	"github.com/moby/buildkit/frontend/dockerfile/instructions"
 	"github.com/moby/buildkit/frontend/dockerfile/parser"
 	"github.com/moby/buildkit/frontend/dockerfile/parser"
 	"github.com/moby/buildkit/frontend/dockerfile/shell"
 	"github.com/moby/buildkit/frontend/dockerfile/shell"
-	gw "github.com/moby/buildkit/frontend/gateway/client"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/util/apicaps"
 	"github.com/moby/buildkit/util/apicaps"
 	"github.com/moby/buildkit/util/system"
 	"github.com/moby/buildkit/util/system"
@@ -240,7 +239,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
 							prefix += platforms.Format(*platform) + " "
 							prefix += platforms.Format(*platform) + " "
 						}
 						}
 						prefix += "internal]"
 						prefix += "internal]"
-						dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName, gw.ResolveImageConfigOpt{
+						dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName, llb.ResolveImageConfigOpt{
 							Platform:    platform,
 							Platform:    platform,
 							ResolveMode: opt.ImageResolveMode.String(),
 							ResolveMode: opt.ImageResolveMode.String(),
 							LogName:     fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName),
 							LogName:     fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName),
@@ -346,9 +345,10 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
 			opt.copyImage = DefaultCopyImage
 			opt.copyImage = DefaultCopyImage
 		}
 		}
 
 
-		if err = dispatchOnBuild(d, d.image.Config.OnBuild, opt); err != nil {
+		if err = dispatchOnBuildTriggers(d, d.image.Config.OnBuild, opt); err != nil {
 			return nil, nil, err
 			return nil, nil, err
 		}
 		}
+		d.image.Config.OnBuild = nil
 
 
 		for _, cmd := range d.commands {
 		for _, cmd := range d.commands {
 			if err := dispatch(d, cmd, opt); err != nil {
 			if err := dispatch(d, cmd, opt); err != nil {
@@ -587,7 +587,7 @@ type command struct {
 	sources []*dispatchState
 	sources []*dispatchState
 }
 }
 
 
-func dispatchOnBuild(d *dispatchState, triggers []string, opt dispatchOpt) error {
+func dispatchOnBuildTriggers(d *dispatchState, triggers []string, opt dispatchOpt) error {
 	for _, trigger := range triggers {
 	for _, trigger := range triggers {
 		ast, err := parser.Parse(strings.NewReader(trigger))
 		ast, err := parser.Parse(strings.NewReader(trigger))
 		if err != nil {
 		if err != nil {
@@ -1214,31 +1214,13 @@ func normalizeContextPaths(paths map[string]struct{}) []string {
 		if p == "/" {
 		if p == "/" {
 			return nil
 			return nil
 		}
 		}
-		pathSlice = append(pathSlice, p)
+		pathSlice = append(pathSlice, path.Join(".", p))
 	}
 	}
 
 
-	toDelete := map[string]struct{}{}
-	for i := range pathSlice {
-		for j := range pathSlice {
-			if i == j {
-				continue
-			}
-			if strings.HasPrefix(pathSlice[j], pathSlice[i]+"/") {
-				delete(paths, pathSlice[j])
-			}
-		}
-	}
-
-	toSort := make([]string, 0, len(paths))
-	for p := range paths {
-		if _, ok := toDelete[p]; !ok {
-			toSort = append(toSort, path.Join(".", p))
-		}
-	}
-	sort.Slice(toSort, func(i, j int) bool {
-		return toSort[i] < toSort[j]
+	sort.Slice(pathSlice, func(i, j int) bool {
+		return pathSlice[i] < pathSlice[j]
 	})
 	})
-	return toSort
+	return pathSlice
 }
 }
 
 
 func proxyEnvFromBuildArgs(args map[string]string) *llb.ProxyEnv {
 func proxyEnvFromBuildArgs(args map[string]string) *llb.ProxyEnv {

+ 1 - 1
vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go

@@ -297,7 +297,7 @@ func parseBuildStageName(args []string) (string, error) {
 	case len(args) == 3 && strings.EqualFold(args[1], "as"):
 	case len(args) == 3 && strings.EqualFold(args[1], "as"):
 		stageName = strings.ToLower(args[2])
 		stageName = strings.ToLower(args[2])
 		if ok, _ := regexp.MatchString("^[a-z][a-z0-9-_\\.]*$", stageName); !ok {
 		if ok, _ := regexp.MatchString("^[a-z][a-z0-9-_\\.]*$", stageName); !ok {
-			return "", errors.Errorf("invalid name for build stage: %q, name can't start with a number or contain symbols", stageName)
+			return "", errors.Errorf("invalid name for build stage: %q, name can't start with a number or contain symbols", args[2])
 		}
 		}
 	case len(args) != 1:
 	case len(args) != 1:
 		return "", errors.New("FROM requires either one or three arguments")
 		return "", errors.New("FROM requires either one or three arguments")

+ 40 - 0
vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go

@@ -311,6 +311,26 @@ func (sw *shellWord) processDollar() (string, error) {
 			return fmt.Sprintf("${%s}", name), nil
 			return fmt.Sprintf("${%s}", name), nil
 		}
 		}
 		return value, nil
 		return value, nil
+	case '?':
+		word, _, err := sw.processStopOn('}')
+		if err != nil {
+			if sw.scanner.Peek() == scanner.EOF {
+				return "", errors.New("syntax error: missing '}'")
+			}
+			return "", err
+		}
+		newValue, found := sw.getEnv(name)
+		if !found {
+			if sw.skipUnsetEnv {
+				return fmt.Sprintf("${%s?%s}", name, word), nil
+			}
+			message := "is not allowed to be unset"
+			if word != "" {
+				message = word
+			}
+			return "", errors.Errorf("%s: %s", name, message)
+		}
+		return newValue, nil
 	case ':':
 	case ':':
 		// Special ${xx:...} format processing
 		// Special ${xx:...} format processing
 		// Yes it allows for recursive $'s in the ... spot
 		// Yes it allows for recursive $'s in the ... spot
@@ -348,6 +368,26 @@ func (sw *shellWord) processDollar() (string, error) {
 
 
 			return newValue, nil
 			return newValue, nil
 
 
+		case '?':
+			if !found {
+				if sw.skipUnsetEnv {
+					return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil
+				}
+				message := "is not allowed to be unset"
+				if word != "" {
+					message = word
+				}
+				return "", errors.Errorf("%s: %s", name, message)
+			}
+			if newValue == "" {
+				message := "is not allowed to be empty"
+				if word != "" {
+					message = word
+				}
+				return "", errors.Errorf("%s: %s", name, message)
+			}
+			return newValue, nil
+
 		default:
 		default:
 			return "", errors.Errorf("unsupported modifier (%c) in substitution", modifier)
 			return "", errors.Errorf("unsupported modifier (%c) in substitution", modifier)
 		}
 		}

+ 4 - 2
vendor/github.com/moby/buildkit/frontend/frontend.go

@@ -6,18 +6,20 @@ import (
 
 
 	"github.com/moby/buildkit/cache"
 	"github.com/moby/buildkit/cache"
 	"github.com/moby/buildkit/client"
 	"github.com/moby/buildkit/client"
+	"github.com/moby/buildkit/client/llb"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/executor"
 	gw "github.com/moby/buildkit/frontend/gateway/client"
 	gw "github.com/moby/buildkit/frontend/gateway/client"
+	"github.com/moby/buildkit/solver/pb"
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
 )
 )
 
 
 type Frontend interface {
 type Frontend interface {
-	Solve(ctx context.Context, llb FrontendLLBBridge, opt map[string]string) (*Result, error)
+	Solve(ctx context.Context, llb FrontendLLBBridge, opt map[string]string, inputs map[string]*pb.Definition) (*Result, error)
 }
 }
 
 
 type FrontendLLBBridge interface {
 type FrontendLLBBridge interface {
 	Solve(ctx context.Context, req SolveRequest) (*Result, error)
 	Solve(ctx context.Context, req SolveRequest) (*Result, error)
-	ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error)
+	ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error)
 	Exec(ctx context.Context, meta executor.Meta, rootfs cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error
 	Exec(ctx context.Context, meta executor.Meta, rootfs cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error
 }
 }
 
 

+ 9 - 11
vendor/github.com/moby/buildkit/frontend/gateway/client/client.go

@@ -3,6 +3,7 @@ package client
 import (
 import (
 	"context"
 	"context"
 
 
+	"github.com/moby/buildkit/client/llb"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/util/apicaps"
 	"github.com/moby/buildkit/util/apicaps"
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
@@ -12,11 +13,13 @@ import (
 
 
 type Client interface {
 type Client interface {
 	Solve(ctx context.Context, req SolveRequest) (*Result, error)
 	Solve(ctx context.Context, req SolveRequest) (*Result, error)
-	ResolveImageConfig(ctx context.Context, ref string, opt ResolveImageConfigOpt) (digest.Digest, []byte, error)
+	ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error)
 	BuildOpts() BuildOpts
 	BuildOpts() BuildOpts
+	Inputs(ctx context.Context) (map[string]llb.State, error)
 }
 }
 
 
 type Reference interface {
 type Reference interface {
+	ToState() (llb.State, error)
 	ReadFile(ctx context.Context, req ReadRequest) ([]byte, error)
 	ReadFile(ctx context.Context, req ReadRequest) ([]byte, error)
 	StatFile(ctx context.Context, req StatRequest) (*fstypes.Stat, error)
 	StatFile(ctx context.Context, req StatRequest) (*fstypes.Stat, error)
 	ReadDir(ctx context.Context, req ReadDirRequest) ([]*fstypes.Stat, error)
 	ReadDir(ctx context.Context, req ReadDirRequest) ([]*fstypes.Stat, error)
@@ -43,10 +46,11 @@ type StatRequest struct {
 
 
 // SolveRequest is same as frontend.SolveRequest but avoiding dependency
 // SolveRequest is same as frontend.SolveRequest but avoiding dependency
 type SolveRequest struct {
 type SolveRequest struct {
-	Definition   *pb.Definition
-	Frontend     string
-	FrontendOpt  map[string]string
-	CacheImports []CacheOptionsEntry
+	Definition     *pb.Definition
+	Frontend       string
+	FrontendOpt    map[string]string
+	FrontendInputs map[string]*pb.Definition
+	CacheImports   []CacheOptionsEntry
 }
 }
 
 
 type CacheOptionsEntry struct {
 type CacheOptionsEntry struct {
@@ -68,9 +72,3 @@ type BuildOpts struct {
 	LLBCaps   apicaps.CapSet
 	LLBCaps   apicaps.CapSet
 	Caps      apicaps.CapSet
 	Caps      apicaps.CapSet
 }
 }
-
-type ResolveImageConfigOpt struct {
-	Platform    *specs.Platform
-	ResolveMode string
-	LogName     string
-}

+ 55 - 17
vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go

@@ -7,6 +7,7 @@ import (
 	"github.com/moby/buildkit/cache"
 	"github.com/moby/buildkit/cache"
 	cacheutil "github.com/moby/buildkit/cache/util"
 	cacheutil "github.com/moby/buildkit/cache/util"
 	clienttypes "github.com/moby/buildkit/client"
 	clienttypes "github.com/moby/buildkit/client"
+	"github.com/moby/buildkit/client/llb"
 	"github.com/moby/buildkit/frontend"
 	"github.com/moby/buildkit/frontend"
 	"github.com/moby/buildkit/frontend/gateway/client"
 	"github.com/moby/buildkit/frontend/gateway/client"
 	gwpb "github.com/moby/buildkit/frontend/gateway/pb"
 	gwpb "github.com/moby/buildkit/frontend/gateway/pb"
@@ -19,9 +20,10 @@ import (
 	fstypes "github.com/tonistiigi/fsutil/types"
 	fstypes "github.com/tonistiigi/fsutil/types"
 )
 )
 
 
-func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, workerInfos []clienttypes.WorkerInfo) (*bridgeClient, error) {
+func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*opspb.Definition, workerInfos []clienttypes.WorkerInfo) (*bridgeClient, error) {
 	return &bridgeClient{
 	return &bridgeClient{
 		opts:              opts,
 		opts:              opts,
+		inputs:            inputs,
 		FrontendLLBBridge: llbBridge,
 		FrontendLLBBridge: llbBridge,
 		sid:               session.FromContext(ctx),
 		sid:               session.FromContext(ctx),
 		workerInfos:       workerInfos,
 		workerInfos:       workerInfos,
@@ -33,6 +35,7 @@ type bridgeClient struct {
 	frontend.FrontendLLBBridge
 	frontend.FrontendLLBBridge
 	mu           sync.Mutex
 	mu           sync.Mutex
 	opts         map[string]string
 	opts         map[string]string
+	inputs       map[string]*opspb.Definition
 	final        map[*ref]struct{}
 	final        map[*ref]struct{}
 	sid          string
 	sid          string
 	exporterAttr map[string][]byte
 	exporterAttr map[string][]byte
@@ -42,10 +45,11 @@ type bridgeClient struct {
 
 
 func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*client.Result, error) {
 func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*client.Result, error) {
 	res, err := c.FrontendLLBBridge.Solve(ctx, frontend.SolveRequest{
 	res, err := c.FrontendLLBBridge.Solve(ctx, frontend.SolveRequest{
-		Definition:   req.Definition,
-		Frontend:     req.Frontend,
-		FrontendOpt:  req.FrontendOpt,
-		CacheImports: req.CacheImports,
+		Definition:     req.Definition,
+		Frontend:       req.Frontend,
+		FrontendOpt:    req.FrontendOpt,
+		FrontendInputs: req.FrontendInputs,
+		CacheImports:   req.CacheImports,
 	})
 	})
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
@@ -54,12 +58,18 @@ func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*cli
 	cRes := &client.Result{}
 	cRes := &client.Result{}
 	c.mu.Lock()
 	c.mu.Lock()
 	for k, r := range res.Refs {
 	for k, r := range res.Refs {
-		rr := &ref{r}
+		rr, err := newRef(r)
+		if err != nil {
+			return nil, err
+		}
 		c.refs = append(c.refs, rr)
 		c.refs = append(c.refs, rr)
 		cRes.AddRef(k, rr)
 		cRes.AddRef(k, rr)
 	}
 	}
 	if r := res.Ref; r != nil {
 	if r := res.Ref; r != nil {
-		rr := &ref{r}
+		rr, err := newRef(r)
+		if err != nil {
+			return nil, err
+		}
 		c.refs = append(c.refs, rr)
 		c.refs = append(c.refs, rr)
 		cRes.SetRef(rr)
 		cRes.SetRef(rr)
 	}
 	}
@@ -88,6 +98,18 @@ func (c *bridgeClient) BuildOpts() client.BuildOpts {
 	}
 	}
 }
 }
 
 
+func (c *bridgeClient) Inputs(ctx context.Context) (map[string]llb.State, error) {
+	inputs := make(map[string]llb.State)
+	for key, def := range c.inputs {
+		defop, err := llb.NewDefinitionOp(def)
+		if err != nil {
+			return nil, err
+		}
+		inputs[key] = llb.NewState(defop)
+	}
+	return inputs, nil
+}
+
 func (c *bridgeClient) toFrontendResult(r *client.Result) (*frontend.Result, error) {
 func (c *bridgeClient) toFrontendResult(r *client.Result) (*frontend.Result, error) {
 	if r == nil {
 	if r == nil {
 		return nil, nil
 		return nil, nil
@@ -96,14 +118,14 @@ func (c *bridgeClient) toFrontendResult(r *client.Result) (*frontend.Result, err
 	res := &frontend.Result{}
 	res := &frontend.Result{}
 
 
 	if r.Refs != nil {
 	if r.Refs != nil {
-		res.Refs = make(map[string]solver.CachedResult, len(r.Refs))
+		res.Refs = make(map[string]solver.ResultProxy, len(r.Refs))
 		for k, r := range r.Refs {
 		for k, r := range r.Refs {
 			rr, ok := r.(*ref)
 			rr, ok := r.(*ref)
 			if !ok {
 			if !ok {
 				return nil, errors.Errorf("invalid reference type for forward %T", r)
 				return nil, errors.Errorf("invalid reference type for forward %T", r)
 			}
 			}
 			c.final[rr] = struct{}{}
 			c.final[rr] = struct{}{}
-			res.Refs[k] = rr.CachedResult
+			res.Refs[k] = rr.ResultProxy
 		}
 		}
 	}
 	}
 	if r := r.Ref; r != nil {
 	if r := r.Ref; r != nil {
@@ -112,7 +134,7 @@ func (c *bridgeClient) toFrontendResult(r *client.Result) (*frontend.Result, err
 			return nil, errors.Errorf("invalid reference type for forward %T", r)
 			return nil, errors.Errorf("invalid reference type for forward %T", r)
 		}
 		}
 		c.final[rr] = struct{}{}
 		c.final[rr] = struct{}{}
-		res.Ref = rr.CachedResult
+		res.Ref = rr.ResultProxy
 	}
 	}
 	res.Metadata = r.Metadata
 	res.Metadata = r.Metadata
 
 
@@ -130,11 +152,23 @@ func (c *bridgeClient) discard(err error) {
 }
 }
 
 
 type ref struct {
 type ref struct {
-	solver.CachedResult
+	solver.ResultProxy
+}
+
+func newRef(r solver.ResultProxy) (*ref, error) {
+	return &ref{ResultProxy: r}, nil
+}
+
+func (r *ref) ToState() (st llb.State, err error) {
+	defop, err := llb.NewDefinitionOp(r.Definition())
+	if err != nil {
+		return st, err
+	}
+	return llb.NewState(defop), nil
 }
 }
 
 
 func (r *ref) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) {
 func (r *ref) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) {
-	ref, err := r.getImmutableRef()
+	ref, err := r.getImmutableRef(ctx)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -151,7 +185,7 @@ func (r *ref) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, err
 }
 }
 
 
 func (r *ref) ReadDir(ctx context.Context, req client.ReadDirRequest) ([]*fstypes.Stat, error) {
 func (r *ref) ReadDir(ctx context.Context, req client.ReadDirRequest) ([]*fstypes.Stat, error) {
-	ref, err := r.getImmutableRef()
+	ref, err := r.getImmutableRef(ctx)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -163,17 +197,21 @@ func (r *ref) ReadDir(ctx context.Context, req client.ReadDirRequest) ([]*fstype
 }
 }
 
 
 func (r *ref) StatFile(ctx context.Context, req client.StatRequest) (*fstypes.Stat, error) {
 func (r *ref) StatFile(ctx context.Context, req client.StatRequest) (*fstypes.Stat, error) {
-	ref, err := r.getImmutableRef()
+	ref, err := r.getImmutableRef(ctx)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 	return cacheutil.StatFile(ctx, ref, req.Path)
 	return cacheutil.StatFile(ctx, ref, req.Path)
 }
 }
 
 
-func (r *ref) getImmutableRef() (cache.ImmutableRef, error) {
-	ref, ok := r.CachedResult.Sys().(*worker.WorkerRef)
+func (r *ref) getImmutableRef(ctx context.Context) (cache.ImmutableRef, error) {
+	rr, err := r.ResultProxy.Result(ctx)
+	if err != nil {
+		return nil, err
+	}
+	ref, ok := rr.Sys().(*worker.WorkerRef)
 	if !ok {
 	if !ok {
-		return nil, errors.Errorf("invalid ref: %T", r.CachedResult.Sys())
+		return nil, errors.Errorf("invalid ref: %T", rr.Sys())
 	}
 	}
 	return ref.ImmutableRef, nil
 	return ref.ImmutableRef, nil
 }
 }

+ 3 - 2
vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go

@@ -5,6 +5,7 @@ import (
 
 
 	"github.com/moby/buildkit/frontend"
 	"github.com/moby/buildkit/frontend"
 	"github.com/moby/buildkit/frontend/gateway/client"
 	"github.com/moby/buildkit/frontend/gateway/client"
+	"github.com/moby/buildkit/solver/pb"
 )
 )
 
 
 func NewGatewayForwarder(w frontend.WorkerInfos, f client.BuildFunc) frontend.Frontend {
 func NewGatewayForwarder(w frontend.WorkerInfos, f client.BuildFunc) frontend.Frontend {
@@ -19,8 +20,8 @@ type GatewayForwarder struct {
 	f       client.BuildFunc
 	f       client.BuildFunc
 }
 }
 
 
-func (gf *GatewayForwarder) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (retRes *frontend.Result, retErr error) {
-	c, err := llbBridgeToGatewayClient(ctx, llbBridge, opts, gf.workers.WorkerInfos())
+func (gf *GatewayForwarder) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*pb.Definition) (retRes *frontend.Result, retErr error) {
+	c, err := llbBridgeToGatewayClient(ctx, llbBridge, opts, inputs, gf.workers.WorkerInfos())
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}

+ 107 - 38
vendor/github.com/moby/buildkit/frontend/gateway/gateway.go

@@ -21,7 +21,6 @@ import (
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/exporter/containerimage/exptypes"
 	"github.com/moby/buildkit/exporter/containerimage/exptypes"
 	"github.com/moby/buildkit/frontend"
 	"github.com/moby/buildkit/frontend"
-	gw "github.com/moby/buildkit/frontend/gateway/client"
 	pb "github.com/moby/buildkit/frontend/gateway/pb"
 	pb "github.com/moby/buildkit/frontend/gateway/pb"
 	"github.com/moby/buildkit/identity"
 	"github.com/moby/buildkit/identity"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/session"
@@ -66,7 +65,7 @@ func filterPrefix(opts map[string]string, pfx string) map[string]string {
 	return m
 	return m
 }
 }
 
 
-func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (*frontend.Result, error) {
+func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*opspb.Definition) (*frontend.Result, error) {
 	source, ok := opts[keySource]
 	source, ok := opts[keySource]
 	if !ok {
 	if !ok {
 		return nil, errors.Errorf("no source specified for gateway")
 		return nil, errors.Errorf("no source specified for gateway")
@@ -82,23 +81,28 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten
 	if isDevel {
 	if isDevel {
 		devRes, err := llbBridge.Solve(session.NewContext(ctx, "gateway:"+sid),
 		devRes, err := llbBridge.Solve(session.NewContext(ctx, "gateway:"+sid),
 			frontend.SolveRequest{
 			frontend.SolveRequest{
-				Frontend:    source,
-				FrontendOpt: filterPrefix(opts, "gateway-"),
+				Frontend:       source,
+				FrontendOpt:    filterPrefix(opts, "gateway-"),
+				FrontendInputs: inputs,
 			})
 			})
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
 		defer func() {
 		defer func() {
-			devRes.EachRef(func(ref solver.CachedResult) error {
+			devRes.EachRef(func(ref solver.ResultProxy) error {
 				return ref.Release(context.TODO())
 				return ref.Release(context.TODO())
 			})
 			})
 		}()
 		}()
 		if devRes.Ref == nil {
 		if devRes.Ref == nil {
 			return nil, errors.Errorf("development gateway didn't return default result")
 			return nil, errors.Errorf("development gateway didn't return default result")
 		}
 		}
-		workerRef, ok := devRes.Ref.Sys().(*worker.WorkerRef)
+		res, err := devRes.Ref.Result(ctx)
+		if err != nil {
+			return nil, err
+		}
+		workerRef, ok := res.Sys().(*worker.WorkerRef)
 		if !ok {
 		if !ok {
-			return nil, errors.Errorf("invalid ref: %T", devRes.Ref.Sys())
+			return nil, errors.Errorf("invalid ref: %T", res.Sys())
 		}
 		}
 		rootFS = workerRef.ImmutableRef
 		rootFS = workerRef.ImmutableRef
 		config, ok := devRes.Metadata[exptypes.ExporterImageConfigKey]
 		config, ok := devRes.Metadata[exptypes.ExporterImageConfigKey]
@@ -113,7 +117,7 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten
 			return nil, err
 			return nil, err
 		}
 		}
 
 
-		dgst, config, err := llbBridge.ResolveImageConfig(ctx, reference.TagNameOnly(sourceRef).String(), gw.ResolveImageConfigOpt{})
+		dgst, config, err := llbBridge.ResolveImageConfig(ctx, reference.TagNameOnly(sourceRef).String(), llb.ResolveImageConfigOpt{})
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
@@ -143,7 +147,7 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten
 			return nil, err
 			return nil, err
 		}
 		}
 		defer func() {
 		defer func() {
-			res.EachRef(func(ref solver.CachedResult) error {
+			res.EachRef(func(ref solver.ResultProxy) error {
 				return ref.Release(context.TODO())
 				return ref.Release(context.TODO())
 			})
 			})
 		}()
 		}()
@@ -151,14 +155,18 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten
 			return nil, errors.Errorf("gateway source didn't return default result")
 			return nil, errors.Errorf("gateway source didn't return default result")
 
 
 		}
 		}
-		workerRef, ok := res.Ref.Sys().(*worker.WorkerRef)
+		r, err := res.Ref.Result(ctx)
+		if err != nil {
+			return nil, err
+		}
+		workerRef, ok := r.Sys().(*worker.WorkerRef)
 		if !ok {
 		if !ok {
-			return nil, errors.Errorf("invalid ref: %T", res.Ref.Sys())
+			return nil, errors.Errorf("invalid ref: %T", r.Sys())
 		}
 		}
 		rootFS = workerRef.ImmutableRef
 		rootFS = workerRef.ImmutableRef
 	}
 	}
 
 
-	lbf, ctx, err := newLLBBridgeForwarder(ctx, llbBridge, gf.workers)
+	lbf, ctx, err := newLLBBridgeForwarder(ctx, llbBridge, gf.workers, inputs)
 	defer lbf.conn.Close()
 	defer lbf.conn.Close()
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
@@ -233,7 +241,7 @@ func (lbf *llbBridgeForwarder) Discard() {
 	for id, r := range lbf.refs {
 	for id, r := range lbf.refs {
 		if lbf.err == nil && lbf.result != nil {
 		if lbf.err == nil && lbf.result != nil {
 			keep := false
 			keep := false
-			lbf.result.EachRef(func(r2 solver.CachedResult) error {
+			lbf.result.EachRef(func(r2 solver.ResultProxy) error {
 				if r == r2 {
 				if r == r2 {
 					keep = true
 					keep = true
 				}
 				}
@@ -285,21 +293,22 @@ func (lbf *llbBridgeForwarder) Result() (*frontend.Result, error) {
 	return lbf.result, nil
 	return lbf.result, nil
 }
 }
 
 
-func NewBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos) *llbBridgeForwarder {
+func NewBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos, inputs map[string]*opspb.Definition) *llbBridgeForwarder {
 	lbf := &llbBridgeForwarder{
 	lbf := &llbBridgeForwarder{
 		callCtx:   ctx,
 		callCtx:   ctx,
 		llbBridge: llbBridge,
 		llbBridge: llbBridge,
-		refs:      map[string]solver.CachedResult{},
+		refs:      map[string]solver.ResultProxy{},
 		doneCh:    make(chan struct{}),
 		doneCh:    make(chan struct{}),
 		pipe:      newPipe(),
 		pipe:      newPipe(),
 		workers:   workers,
 		workers:   workers,
+		inputs:    inputs,
 	}
 	}
 	return lbf
 	return lbf
 }
 }
 
 
-func newLLBBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos) (*llbBridgeForwarder, context.Context, error) {
+func newLLBBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos, inputs map[string]*opspb.Definition) (*llbBridgeForwarder, context.Context, error) {
 	ctx, cancel := context.WithCancel(ctx)
 	ctx, cancel := context.WithCancel(ctx)
-	lbf := NewBridgeForwarder(ctx, llbBridge, workers)
+	lbf := NewBridgeForwarder(ctx, llbBridge, workers, inputs)
 	server := grpc.NewServer()
 	server := grpc.NewServer()
 	grpc_health_v1.RegisterHealthServer(server, health.NewServer())
 	grpc_health_v1.RegisterHealthServer(server, health.NewServer())
 	pb.RegisterLLBBridgeServer(server, lbf)
 	pb.RegisterLLBBridgeServer(server, lbf)
@@ -380,7 +389,7 @@ type llbBridgeForwarder struct {
 	mu        sync.Mutex
 	mu        sync.Mutex
 	callCtx   context.Context
 	callCtx   context.Context
 	llbBridge frontend.FrontendLLBBridge
 	llbBridge frontend.FrontendLLBBridge
-	refs      map[string]solver.CachedResult
+	refs      map[string]solver.ResultProxy
 	// lastRef      solver.CachedResult
 	// lastRef      solver.CachedResult
 	// lastRefs     map[string]solver.CachedResult
 	// lastRefs     map[string]solver.CachedResult
 	// err          error
 	// err          error
@@ -389,6 +398,7 @@ type llbBridgeForwarder struct {
 	err               error
 	err               error
 	exporterAttr      map[string][]byte
 	exporterAttr      map[string][]byte
 	workers           frontend.WorkerInfos
 	workers           frontend.WorkerInfos
+	inputs            map[string]*opspb.Definition
 	isErrServerClosed bool
 	isErrServerClosed bool
 	*pipe
 	*pipe
 }
 }
@@ -405,7 +415,7 @@ func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.R
 			OSFeatures:   p.OSFeatures,
 			OSFeatures:   p.OSFeatures,
 		}
 		}
 	}
 	}
-	dgst, dt, err := lbf.llbBridge.ResolveImageConfig(ctx, req.Ref, gw.ResolveImageConfigOpt{
+	dgst, dt, err := lbf.llbBridge.ResolveImageConfig(ctx, req.Ref, llb.ResolveImageConfigOpt{
 		Platform:    platform,
 		Platform:    platform,
 		ResolveMode: req.ResolveMode,
 		ResolveMode: req.ResolveMode,
 		LogName:     req.LogName,
 		LogName:     req.LogName,
@@ -444,12 +454,14 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest)
 			Attrs: e.Attrs,
 			Attrs: e.Attrs,
 		})
 		})
 	}
 	}
+
 	ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx)
 	ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx)
 	res, err := lbf.llbBridge.Solve(ctx, frontend.SolveRequest{
 	res, err := lbf.llbBridge.Solve(ctx, frontend.SolveRequest{
-		Definition:   req.Definition,
-		Frontend:     req.Frontend,
-		FrontendOpt:  req.FrontendOpt,
-		CacheImports: cacheImports,
+		Definition:     req.Definition,
+		Frontend:       req.Frontend,
+		FrontendOpt:    req.FrontendOpt,
+		FrontendInputs: req.FrontendInputs,
+		CacheImports:   cacheImports,
 	})
 	})
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
@@ -466,6 +478,7 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest)
 	lbf.mu.Lock()
 	lbf.mu.Lock()
 	if res.Refs != nil {
 	if res.Refs != nil {
 		ids := make(map[string]string, len(res.Refs))
 		ids := make(map[string]string, len(res.Refs))
+		defs := make(map[string]*opspb.Definition, len(res.Refs))
 		for k, ref := range res.Refs {
 		for k, ref := range res.Refs {
 			id := identity.NewID()
 			id := identity.NewID()
 			if ref == nil {
 			if ref == nil {
@@ -474,17 +487,36 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest)
 				lbf.refs[id] = ref
 				lbf.refs[id] = ref
 			}
 			}
 			ids[k] = id
 			ids[k] = id
+			defs[k] = ref.Definition()
+		}
+
+		if req.AllowResultArrayRef {
+			refMap := make(map[string]*pb.Ref, len(res.Refs))
+			for k, id := range ids {
+				refMap[k] = &pb.Ref{Id: id, Def: defs[k]}
+			}
+			pbRes.Result = &pb.Result_Refs{Refs: &pb.RefMap{Refs: refMap}}
+		} else {
+			pbRes.Result = &pb.Result_RefsDeprecated{RefsDeprecated: &pb.RefMapDeprecated{Refs: ids}}
 		}
 		}
-		pbRes.Result = &pb.Result_Refs{Refs: &pb.RefMap{Refs: ids}}
 	} else {
 	} else {
+		ref := res.Ref
 		id := identity.NewID()
 		id := identity.NewID()
-		if res.Ref == nil {
+
+		var def *opspb.Definition
+		if ref == nil {
 			id = ""
 			id = ""
 		} else {
 		} else {
-			lbf.refs[id] = res.Ref
+			def = ref.Definition()
+			lbf.refs[id] = ref
 		}
 		}
 		defaultID = id
 		defaultID = id
-		pbRes.Result = &pb.Result_Ref{Ref: id}
+
+		if req.AllowResultArrayRef {
+			pbRes.Result = &pb.Result_Ref{Ref: &pb.Ref{Id: id, Def: def}}
+		} else {
+			pbRes.Result = &pb.Result_RefDeprecated{RefDeprecated: id}
+		}
 	}
 	}
 	lbf.mu.Unlock()
 	lbf.mu.Unlock()
 
 
@@ -528,9 +560,13 @@ func (lbf *llbBridgeForwarder) ReadFile(ctx context.Context, req *pb.ReadFileReq
 	if ref == nil {
 	if ref == nil {
 		return nil, errors.Wrapf(os.ErrNotExist, "%s not found", req.FilePath)
 		return nil, errors.Wrapf(os.ErrNotExist, "%s not found", req.FilePath)
 	}
 	}
-	workerRef, ok := ref.Sys().(*worker.WorkerRef)
+	r, err := ref.Result(ctx)
+	if err != nil {
+		return nil, err
+	}
+	workerRef, ok := r.Sys().(*worker.WorkerRef)
 	if !ok {
 	if !ok {
-		return nil, errors.Errorf("invalid ref: %T", ref.Sys())
+		return nil, errors.Errorf("invalid ref: %T", r.Sys())
 	}
 	}
 
 
 	newReq := cacheutil.ReadRequest{
 	newReq := cacheutil.ReadRequest{
@@ -562,9 +598,13 @@ func (lbf *llbBridgeForwarder) ReadDir(ctx context.Context, req *pb.ReadDirReque
 	if ref == nil {
 	if ref == nil {
 		return nil, errors.Wrapf(os.ErrNotExist, "%s not found", req.DirPath)
 		return nil, errors.Wrapf(os.ErrNotExist, "%s not found", req.DirPath)
 	}
 	}
-	workerRef, ok := ref.Sys().(*worker.WorkerRef)
+	r, err := ref.Result(ctx)
+	if err != nil {
+		return nil, err
+	}
+	workerRef, ok := r.Sys().(*worker.WorkerRef)
 	if !ok {
 	if !ok {
-		return nil, errors.Errorf("invalid ref: %T", ref.Sys())
+		return nil, errors.Errorf("invalid ref: %T", r.Sys())
 	}
 	}
 
 
 	newReq := cacheutil.ReadDirRequest{
 	newReq := cacheutil.ReadDirRequest{
@@ -590,9 +630,13 @@ func (lbf *llbBridgeForwarder) StatFile(ctx context.Context, req *pb.StatFileReq
 	if ref == nil {
 	if ref == nil {
 		return nil, errors.Wrapf(os.ErrNotExist, "%s not found", req.Path)
 		return nil, errors.Wrapf(os.ErrNotExist, "%s not found", req.Path)
 	}
 	}
-	workerRef, ok := ref.Sys().(*worker.WorkerRef)
+	r, err := ref.Result(ctx)
+	if err != nil {
+		return nil, err
+	}
+	workerRef, ok := r.Sys().(*worker.WorkerRef)
 	if !ok {
 	if !ok {
-		return nil, errors.Errorf("invalid ref: %T", ref.Sys())
+		return nil, errors.Errorf("invalid ref: %T", r.Sys())
 	}
 	}
 
 
 	st, err := cacheutil.StatFile(ctx, workerRef.ImmutableRef, req.Path)
 	st, err := cacheutil.StatFile(ctx, workerRef.ImmutableRef, req.Path)
@@ -635,16 +679,32 @@ func (lbf *llbBridgeForwarder) Return(ctx context.Context, in *pb.ReturnRequest)
 		}
 		}
 
 
 		switch res := in.Result.Result.(type) {
 		switch res := in.Result.Result.(type) {
+		case *pb.Result_RefDeprecated:
+			ref, err := lbf.convertRef(res.RefDeprecated)
+			if err != nil {
+				return nil, err
+			}
+			r.Ref = ref
+		case *pb.Result_RefsDeprecated:
+			m := map[string]solver.ResultProxy{}
+			for k, id := range res.RefsDeprecated.Refs {
+				ref, err := lbf.convertRef(id)
+				if err != nil {
+					return nil, err
+				}
+				m[k] = ref
+			}
+			r.Refs = m
 		case *pb.Result_Ref:
 		case *pb.Result_Ref:
-			ref, err := lbf.convertRef(res.Ref)
+			ref, err := lbf.convertRef(res.Ref.Id)
 			if err != nil {
 			if err != nil {
 				return nil, err
 				return nil, err
 			}
 			}
 			r.Ref = ref
 			r.Ref = ref
 		case *pb.Result_Refs:
 		case *pb.Result_Refs:
-			m := map[string]solver.CachedResult{}
-			for k, v := range res.Refs.Refs {
-				ref, err := lbf.convertRef(v)
+			m := map[string]solver.ResultProxy{}
+			for k, ref := range res.Refs.Refs {
+				ref, err := lbf.convertRef(ref.Id)
 				if err != nil {
 				if err != nil {
 					return nil, err
 					return nil, err
 				}
 				}
@@ -656,16 +716,25 @@ func (lbf *llbBridgeForwarder) Return(ctx context.Context, in *pb.ReturnRequest)
 	}
 	}
 }
 }
 
 
-func (lbf *llbBridgeForwarder) convertRef(id string) (solver.CachedResult, error) {
+func (lbf *llbBridgeForwarder) Inputs(ctx context.Context, in *pb.InputsRequest) (*pb.InputsResponse, error) {
+	return &pb.InputsResponse{
+		Definitions: lbf.inputs,
+	}, nil
+}
+
+func (lbf *llbBridgeForwarder) convertRef(id string) (solver.ResultProxy, error) {
 	if id == "" {
 	if id == "" {
 		return nil, nil
 		return nil, nil
 	}
 	}
+
 	lbf.mu.Lock()
 	lbf.mu.Lock()
 	defer lbf.mu.Unlock()
 	defer lbf.mu.Unlock()
+
 	r, ok := lbf.refs[id]
 	r, ok := lbf.refs[id]
 	if !ok {
 	if !ok {
 		return nil, errors.Errorf("return reference %s not found", id)
 		return nil, errors.Errorf("return reference %s not found", id)
 	}
 	}
+
 	return r, nil
 	return r, nil
 }
 }
 
 

+ 115 - 25
vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go

@@ -10,6 +10,7 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/gogo/googleapis/google/rpc"
 	"github.com/gogo/googleapis/google/rpc"
+	"github.com/moby/buildkit/client/llb"
 	"github.com/moby/buildkit/frontend/gateway/client"
 	"github.com/moby/buildkit/frontend/gateway/client"
 	pb "github.com/moby/buildkit/frontend/gateway/pb"
 	pb "github.com/moby/buildkit/frontend/gateway/pb"
 	opspb "github.com/moby/buildkit/solver/pb"
 	opspb "github.com/moby/buildkit/solver/pb"
@@ -68,15 +69,15 @@ func current() (GrpcClient, error) {
 	return New(ctx, opts(), sessionID(), product(), pb.NewLLBBridgeClient(conn), workers())
 	return New(ctx, opts(), sessionID(), product(), pb.NewLLBBridgeClient(conn), workers())
 }
 }
 
 
-func convertRef(ref client.Reference) (string, error) {
+func convertRef(ref client.Reference) (*pb.Ref, error) {
 	if ref == nil {
 	if ref == nil {
-		return "", nil
+		return &pb.Ref{}, nil
 	}
 	}
 	r, ok := ref.(*reference)
 	r, ok := ref.(*reference)
 	if !ok {
 	if !ok {
-		return "", errors.Errorf("invalid return reference type %T", ref)
+		return nil, errors.Errorf("invalid return reference type %T", ref)
 	}
 	}
-	return r.id, nil
+	return &pb.Ref{Id: r.id, Def: r.def}, nil
 }
 }
 
 
 func RunFromEnvironment(ctx context.Context, f client.BuildFunc) error {
 func RunFromEnvironment(ctx context.Context, f client.BuildFunc) error {
@@ -105,22 +106,43 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro
 					Metadata: res.Metadata,
 					Metadata: res.Metadata,
 				}
 				}
 				if res.Refs != nil {
 				if res.Refs != nil {
-					m := map[string]string{}
-					for k, r := range res.Refs {
-						id, err := convertRef(r)
-						if err != nil {
-							retError = err
-							continue
+					if c.caps.Supports(pb.CapProtoRefArray) == nil {
+						m := map[string]*pb.Ref{}
+						for k, r := range res.Refs {
+							pbRef, err := convertRef(r)
+							if err != nil {
+								retError = err
+								continue
+							}
+							m[k] = pbRef
 						}
 						}
-						m[k] = id
+						pbRes.Result = &pb.Result_Refs{Refs: &pb.RefMap{Refs: m}}
+					} else {
+						// Server doesn't support the new wire format for refs, so we construct
+						// a deprecated result ref map.
+						m := map[string]string{}
+						for k, r := range res.Refs {
+							pbRef, err := convertRef(r)
+							if err != nil {
+								retError = err
+								continue
+							}
+							m[k] = pbRef.Id
+						}
+						pbRes.Result = &pb.Result_RefsDeprecated{RefsDeprecated: &pb.RefMapDeprecated{Refs: m}}
 					}
 					}
-					pbRes.Result = &pb.Result_Refs{Refs: &pb.RefMap{Refs: m}}
 				} else {
 				} else {
-					id, err := convertRef(res.Ref)
+					pbRef, err := convertRef(res.Ref)
 					if err != nil {
 					if err != nil {
 						retError = err
 						retError = err
 					} else {
 					} else {
-						pbRes.Result = &pb.Result_Ref{Ref: id}
+						if c.caps.Supports(pb.CapProtoRefArray) == nil {
+							pbRes.Result = &pb.Result_Ref{Ref: pbRef}
+						} else {
+							// Server doesn't support the new wire format for refs, so we construct
+							// a deprecated result ref.
+							pbRes.Result = &pb.Result_RefDeprecated{RefDeprecated: pbRef.Id}
+						}
 					}
 					}
 				}
 				}
 				if retError == nil {
 				if retError == nil {
@@ -280,10 +302,12 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (*clie
 	}
 	}
 
 
 	req := &pb.SolveRequest{
 	req := &pb.SolveRequest{
-		Definition:        creq.Definition,
-		Frontend:          creq.Frontend,
-		FrontendOpt:       creq.FrontendOpt,
-		AllowResultReturn: true,
+		Definition:          creq.Definition,
+		Frontend:            creq.Frontend,
+		FrontendOpt:         creq.FrontendOpt,
+		FrontendInputs:      creq.FrontendInputs,
+		AllowResultReturn:   true,
+		AllowResultArrayRef: true,
 		// old API
 		// old API
 		ImportCacheRefsDeprecated: legacyRegistryCacheImports,
 		ImportCacheRefsDeprecated: legacyRegistryCacheImports,
 		// new API
 		// new API
@@ -310,25 +334,44 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (*clie
 	} else {
 	} else {
 		res.Metadata = resp.Result.Metadata
 		res.Metadata = resp.Result.Metadata
 		switch pbRes := resp.Result.Result.(type) {
 		switch pbRes := resp.Result.Result.(type) {
-		case *pb.Result_Ref:
-			if id := pbRes.Ref; id != "" {
+		case *pb.Result_RefDeprecated:
+			if id := pbRes.RefDeprecated; id != "" {
 				res.SetRef(&reference{id: id, c: c})
 				res.SetRef(&reference{id: id, c: c})
 			}
 			}
-		case *pb.Result_Refs:
-			for k, v := range pbRes.Refs.Refs {
+		case *pb.Result_RefsDeprecated:
+			for k, v := range pbRes.RefsDeprecated.Refs {
 				ref := &reference{id: v, c: c}
 				ref := &reference{id: v, c: c}
 				if v == "" {
 				if v == "" {
 					ref = nil
 					ref = nil
 				}
 				}
 				res.AddRef(k, ref)
 				res.AddRef(k, ref)
 			}
 			}
+		case *pb.Result_Ref:
+			if pbRes.Ref.Id != "" {
+				ref, err := newReference(c, pbRes.Ref)
+				if err != nil {
+					return nil, err
+				}
+				res.SetRef(ref)
+			}
+		case *pb.Result_Refs:
+			for k, v := range pbRes.Refs.Refs {
+				var ref *reference
+				if v.Id != "" {
+					ref, err = newReference(c, v)
+					if err != nil {
+						return nil, err
+					}
+				}
+				res.AddRef(k, ref)
+			}
 		}
 		}
 	}
 	}
 
 
 	return res, nil
 	return res, nil
 }
 }
 
 
-func (c *grpcClient) ResolveImageConfig(ctx context.Context, ref string, opt client.ResolveImageConfigOpt) (digest.Digest, []byte, error) {
+func (c *grpcClient) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) {
 	var p *opspb.Platform
 	var p *opspb.Platform
 	if platform := opt.Platform; platform != nil {
 	if platform := opt.Platform; platform != nil {
 		p = &opspb.Platform{
 		p = &opspb.Platform{
@@ -357,9 +400,56 @@ func (c *grpcClient) BuildOpts() client.BuildOpts {
 	}
 	}
 }
 }
 
 
+func (c *grpcClient) Inputs(ctx context.Context) (map[string]llb.State, error) {
+	err := c.caps.Supports(pb.CapFrontendInputs)
+	if err != nil {
+		return nil, err
+	}
+
+	resp, err := c.client.Inputs(ctx, &pb.InputsRequest{})
+	if err != nil {
+		return nil, err
+	}
+
+	inputs := make(map[string]llb.State)
+	for key, def := range resp.Definitions {
+		op, err := llb.NewDefinitionOp(def)
+		if err != nil {
+			return nil, err
+		}
+		inputs[key] = llb.NewState(op)
+	}
+	return inputs, nil
+
+}
+
 type reference struct {
 type reference struct {
-	id string
-	c  *grpcClient
+	c      *grpcClient
+	id     string
+	def    *opspb.Definition
+	output llb.Output
+}
+
+func newReference(c *grpcClient, ref *pb.Ref) (*reference, error) {
+	return &reference{c: c, id: ref.Id, def: ref.Def}, nil
+}
+
+func (r *reference) ToState() (st llb.State, err error) {
+	err = r.c.caps.Supports(pb.CapReferenceOutput)
+	if err != nil {
+		return st, err
+	}
+
+	if r.def == nil {
+		return st, errors.Errorf("gateway did not return reference with definition")
+	}
+
+	defop, err := llb.NewDefinitionOp(r.def)
+	if err != nil {
+		return st, err
+	}
+
+	return llb.NewState(defop), nil
 }
 }
 
 
 func (r *reference) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) {
 func (r *reference) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) {

+ 34 - 0
vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go

@@ -19,6 +19,19 @@ const (
 	CapReadDir                 apicaps.CapID = "readdir"
 	CapReadDir                 apicaps.CapID = "readdir"
 	CapStatFile                apicaps.CapID = "statfile"
 	CapStatFile                apicaps.CapID = "statfile"
 	CapImportCaches            apicaps.CapID = "importcaches"
 	CapImportCaches            apicaps.CapID = "importcaches"
+
+	// CapProtoRefArray is a capability to return arrays of refs instead of single
+	// refs. This capability is only for the wire format change and shouldn't be
+	// used in frontends for feature detection.
+	CapProtoRefArray apicaps.CapID = "proto.refarray"
+
+	// CapReferenceOutput is a capability to use a reference of a solved result as
+	// an llb.Output.
+	CapReferenceOutput apicaps.CapID = "reference.output"
+
+	// CapFrontendInputs is a capability to request frontend inputs from the
+	// LLBBridge GRPC server.
+	CapFrontendInputs apicaps.CapID = "frontend.inputs"
 )
 )
 
 
 func init() {
 func init() {
@@ -92,4 +105,25 @@ func init() {
 		Enabled: true,
 		Enabled: true,
 		Status:  apicaps.CapStatusExperimental,
 		Status:  apicaps.CapStatusExperimental,
 	})
 	})
+
+	Caps.Init(apicaps.Cap{
+		ID:      CapProtoRefArray,
+		Name:    "wire format ref arrays",
+		Enabled: true,
+		Status:  apicaps.CapStatusExperimental,
+	})
+
+	Caps.Init(apicaps.Cap{
+		ID:      CapReferenceOutput,
+		Name:    "reference output",
+		Enabled: true,
+		Status:  apicaps.CapStatusExperimental,
+	})
+
+	Caps.Init(apicaps.Cap{
+		ID:      CapFrontendInputs,
+		Name:    "frontend inputs",
+		Enabled: true,
+		Status:  apicaps.CapStatusExperimental,
+	})
 }
 }

File diff suppressed because it is too large
+ 567 - 74
vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go


+ 29 - 3
vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto

@@ -26,20 +26,35 @@ service LLBBridge {
 	rpc StatFile(StatFileRequest) returns (StatFileResponse);
 	rpc StatFile(StatFileRequest) returns (StatFileResponse);
 	rpc Ping(PingRequest) returns (PongResponse);
 	rpc Ping(PingRequest) returns (PongResponse);
 	rpc Return(ReturnRequest) returns (ReturnResponse);
 	rpc Return(ReturnRequest) returns (ReturnResponse);
+	// apicaps:CapFrontendInputs
+	rpc Inputs(InputsRequest) returns (InputsResponse);
 }
 }
 
 
 message Result {
 message Result {
 	oneof result {
 	oneof result {
-		string ref = 1;
-		RefMap refs = 2;
+    		// Deprecated non-array refs.
+		string refDeprecated = 1;
+		RefMapDeprecated refsDeprecated = 2;
+
+		Ref ref = 3;
+		RefMap refs = 4;
 	}
 	}
 	map<string, bytes> metadata = 10;
 	map<string, bytes> metadata = 10;
 }
 }
 
 
-message RefMap {
+message RefMapDeprecated {
 	map<string, string> refs = 1;
 	map<string, string> refs = 1;
 }
 }
 
 
+message Ref {
+	string id = 1;
+	pb.Definition def = 2;
+}
+
+message RefMap {
+	map<string, Ref> refs = 1;
+}
+
 message ReturnRequest {
 message ReturnRequest {
 	Result result = 1;
 	Result result = 1;
 	google.rpc.Status error = 2;
 	google.rpc.Status error = 2;
@@ -48,6 +63,13 @@ message ReturnRequest {
 message ReturnResponse {
 message ReturnResponse {
 }
 }
 
 
+message InputsRequest {
+}
+
+message InputsResponse {
+    map<string, pb.Definition> Definitions = 1;
+}
+
 message ResolveImageConfigRequest {
 message ResolveImageConfigRequest {
 	string Ref = 1;
 	string Ref = 1;
 	pb.Platform Platform = 2;
 	pb.Platform Platform = 2;
@@ -70,6 +92,7 @@ message SolveRequest {
         // for each of the ImportCacheRefs entry to CacheImports for compatibility. (planned to be removed)
         // for each of the ImportCacheRefs entry to CacheImports for compatibility. (planned to be removed)
 	repeated string ImportCacheRefsDeprecated = 4;
 	repeated string ImportCacheRefsDeprecated = 4;
 	bool allowResultReturn = 5;
 	bool allowResultReturn = 5;
+	bool allowResultArrayRef = 6;
 	
 	
 	// apicaps.CapSolveInlineReturn deprecated
 	// apicaps.CapSolveInlineReturn deprecated
 	bool Final = 10;
 	bool Final = 10;
@@ -77,6 +100,9 @@ message SolveRequest {
 	// CacheImports was added in BuildKit v0.4.0.
 	// CacheImports was added in BuildKit v0.4.0.
 	// apicaps:CapImportCaches
 	// apicaps:CapImportCaches
 	repeated CacheOptionsEntry CacheImports = 12;
 	repeated CacheOptionsEntry CacheImports = 12;
+
+	// apicaps:CapFrontendInputs
+	map<string, pb.Definition> FrontendInputs = 13;
 }
 }
 
 
 // CacheOptionsEntry corresponds to the control.CacheOptionsEntry
 // CacheOptionsEntry corresponds to the control.CacheOptionsEntry

+ 6 - 4
vendor/github.com/moby/buildkit/frontend/result.go

@@ -1,14 +1,16 @@
 package frontend
 package frontend
 
 
-import "github.com/moby/buildkit/solver"
+import (
+	"github.com/moby/buildkit/solver"
+)
 
 
 type Result struct {
 type Result struct {
-	Ref      solver.CachedResult
-	Refs     map[string]solver.CachedResult
+	Ref      solver.ResultProxy
+	Refs     map[string]solver.ResultProxy
 	Metadata map[string][]byte
 	Metadata map[string][]byte
 }
 }
 
 
-func (r *Result) EachRef(fn func(solver.CachedResult) error) (err error) {
+func (r *Result) EachRef(fn func(solver.ResultProxy) error) (err error) {
 	if r.Ref != nil {
 	if r.Ref != nil {
 		err = fn(r.Ref)
 		err = fn(r.Ref)
 	}
 	}

+ 8 - 6
vendor/github.com/moby/buildkit/go.mod

@@ -1,23 +1,25 @@
 module github.com/moby/buildkit
 module github.com/moby/buildkit
 
 
-go 1.11
+go 1.12
 
 
 require (
 require (
 	github.com/BurntSushi/toml v0.3.1
 	github.com/BurntSushi/toml v0.3.1
 	github.com/Microsoft/go-winio v0.4.14
 	github.com/Microsoft/go-winio v0.4.14
+	github.com/Microsoft/hcsshim v0.8.5 // indirect
 	github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7 // indirect
 	github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7 // indirect
 	github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58 // indirect
 	github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58 // indirect
 	github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601 // indirect
 	github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601 // indirect
 	github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50
 	github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50
 	github.com/containerd/containerd v1.4.0-0.20191014053712-acdcf13d5eaf
 	github.com/containerd/containerd v1.4.0-0.20191014053712-acdcf13d5eaf
-	github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6
+	github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41
 	github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c // indirect
 	github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c // indirect
 	github.com/containerd/go-cni v0.0.0-20190813230227-49fbd9b210f3
 	github.com/containerd/go-cni v0.0.0-20190813230227-49fbd9b210f3
 	github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda
 	github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda
 	github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8 // indirect
 	github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8 // indirect
 	github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd // indirect
 	github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd // indirect
 	github.com/containernetworking/cni v0.7.1 // indirect
 	github.com/containernetworking/cni v0.7.1 // indirect
-	github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a
+	github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a // indirect
+	github.com/coreos/go-systemd/v22 v22.0.0
 	github.com/docker/cli v0.0.0-20190321234815-f40f9c240ab0
 	github.com/docker/cli v0.0.0-20190321234815-f40f9c240ab0
 	github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible
 	github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible
 	github.com/docker/docker v1.14.0-0.20190319215453-e7b5f7dbe98c
 	github.com/docker/docker v1.14.0-0.20190319215453-e7b5f7dbe98c
@@ -53,9 +55,9 @@ require (
 	github.com/prometheus/procfs v0.0.3 // indirect
 	github.com/prometheus/procfs v0.0.3 // indirect
 	github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
 	github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
 	github.com/sirupsen/logrus v1.4.1
 	github.com/sirupsen/logrus v1.4.1
-	github.com/stretchr/testify v1.3.0
+	github.com/stretchr/testify v1.4.0
 	github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 // indirect
 	github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 // indirect
-	github.com/tonistiigi/fsutil v0.0.0-20190819224149-3d2716dd0a4d
+	github.com/tonistiigi/fsutil v0.0.0-20200128191323-6c909ab392c1
 	github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
 	github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
 	github.com/uber/jaeger-client-go v0.0.0-20180103221425-e02c85f9069e
 	github.com/uber/jaeger-client-go v0.0.0-20180103221425-e02c85f9069e
 	github.com/uber/jaeger-lib v1.2.1 // indirect
 	github.com/uber/jaeger-lib v1.2.1 // indirect
@@ -63,7 +65,7 @@ require (
 	github.com/vishvananda/netlink v1.0.0 // indirect
 	github.com/vishvananda/netlink v1.0.0 // indirect
 	github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect
 	github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect
 	go.etcd.io/bbolt v1.3.3
 	go.etcd.io/bbolt v1.3.3
-	golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
+	golang.org/x/crypto v0.0.0-20200214034016-1d94cc7ab1c6
 	golang.org/x/net v0.0.0-20190522155817-f3200d17e092
 	golang.org/x/net v0.0.0-20190522155817-f3200d17e092
 	golang.org/x/sync v0.0.0-20190423024810-112230192c58
 	golang.org/x/sync v0.0.0-20190423024810-112230192c58
 	golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e
 	golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e

+ 4 - 0
vendor/github.com/moby/buildkit/session/sshforward/ssh.go

@@ -75,6 +75,10 @@ func MountSSHSocket(ctx context.Context, c session.Caller, opt SocketOpt) (sockP
 		}
 		}
 	}()
 	}()
 
 
+	if err := os.Chmod(dir, 0711); err != nil {
+		return "", nil, errors.WithStack(err)
+	}
+
 	sockPath = filepath.Join(dir, "ssh_auth_sock")
 	sockPath = filepath.Join(dir, "ssh_auth_sock")
 
 
 	l, err := net.Listen("unix", sockPath)
 	l, err := net.Listen("unix", sockPath)

+ 3 - 0
vendor/github.com/moby/buildkit/solver/combinedcache.go

@@ -87,6 +87,9 @@ func (cm *combinedCacheManager) Load(ctx context.Context, rec *CacheRecord) (res
 			}
 			}
 		}
 		}
 	}
 	}
+	if len(results) == 0 { // TODO: handle gracefully
+		return nil, errors.Errorf("failed to load deleted cache")
+	}
 	return results[0].Result, nil
 	return results[0].Result, nil
 }
 }
 
 

+ 144 - 57
vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go

@@ -9,14 +9,17 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/platforms"
+	"github.com/mitchellh/hashstructure"
 	"github.com/moby/buildkit/cache"
 	"github.com/moby/buildkit/cache"
 	"github.com/moby/buildkit/cache/remotecache"
 	"github.com/moby/buildkit/cache/remotecache"
+	"github.com/moby/buildkit/client/llb"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/frontend"
 	"github.com/moby/buildkit/frontend"
 	gw "github.com/moby/buildkit/frontend/gateway/client"
 	gw "github.com/moby/buildkit/frontend/gateway/client"
-	"github.com/moby/buildkit/identity"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/solver"
 	"github.com/moby/buildkit/solver"
+	"github.com/moby/buildkit/solver/pb"
+	"github.com/moby/buildkit/util/flightcontrol"
 	"github.com/moby/buildkit/util/tracing"
 	"github.com/moby/buildkit/util/tracing"
 	"github.com/moby/buildkit/worker"
 	"github.com/moby/buildkit/worker"
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
@@ -37,27 +40,28 @@ type llbBridge struct {
 	sm                        *session.Manager
 	sm                        *session.Manager
 }
 }
 
 
-func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res *frontend.Result, err error) {
+func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImports []gw.CacheOptionsEntry) (solver.CachedResult, error) {
 	w, err := b.resolveWorker()
 	w, err := b.resolveWorker()
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
+	ent, err := loadEntitlements(b.builder)
+	if err != nil {
+		return nil, err
+	}
 	var cms []solver.CacheManager
 	var cms []solver.CacheManager
-	for _, im := range req.CacheImports {
+	for _, im := range cacheImports {
+		cmID, err := cmKey(im)
+		if err != nil {
+			return nil, err
+		}
 		b.cmsMu.Lock()
 		b.cmsMu.Lock()
 		var cm solver.CacheManager
 		var cm solver.CacheManager
-		cmId := identity.NewID()
-		if im.Type == "registry" {
-			// For compatibility with < v0.4.0
-			if ref := im.Attrs["ref"]; ref != "" {
-				cmId = ref
-			}
-		}
-		if prevCm, ok := b.cms[cmId]; !ok {
-			func(cmId string, im gw.CacheOptionsEntry) {
-				cm = newLazyCacheManager(cmId, func() (solver.CacheManager, error) {
+		if prevCm, ok := b.cms[cmID]; !ok {
+			func(cmID string, im gw.CacheOptionsEntry) {
+				cm = newLazyCacheManager(cmID, func() (solver.CacheManager, error) {
 					var cmNew solver.CacheManager
 					var cmNew solver.CacheManager
-					if err := inVertexContext(b.builder.Context(ctx), "importing cache manifest from "+cmId, "", func(ctx context.Context) error {
+					if err := inVertexContext(b.builder.Context(context.TODO()), "importing cache manifest from "+cmID, "", func(ctx context.Context) error {
 						resolveCI, ok := b.resolveCacheImporterFuncs[im.Type]
 						resolveCI, ok := b.resolveCacheImporterFuncs[im.Type]
 						if !ok {
 						if !ok {
 							return errors.Errorf("unknown cache importer: %s", im.Type)
 							return errors.Errorf("unknown cache importer: %s", im.Type)
@@ -66,63 +70,70 @@ func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res *
 						if err != nil {
 						if err != nil {
 							return err
 							return err
 						}
 						}
-						cmNew, err = ci.Resolve(ctx, desc, cmId, w)
+						cmNew, err = ci.Resolve(ctx, desc, cmID, w)
 						return err
 						return err
 					}); err != nil {
 					}); err != nil {
-						logrus.Debugf("error while importing cache manifest from cmId=%s: %v", cmId, err)
+						logrus.Debugf("error while importing cache manifest from cmId=%s: %v", cmID, err)
 						return nil, err
 						return nil, err
 					}
 					}
 					return cmNew, nil
 					return cmNew, nil
 				})
 				})
-			}(cmId, im)
-			b.cms[cmId] = cm
+			}(cmID, im)
+			b.cms[cmID] = cm
 		} else {
 		} else {
 			cm = prevCm
 			cm = prevCm
 		}
 		}
 		cms = append(cms, cm)
 		cms = append(cms, cm)
 		b.cmsMu.Unlock()
 		b.cmsMu.Unlock()
 	}
 	}
+	dpc := &detectPrunedCacheID{}
 
 
-	if req.Definition != nil && req.Definition.Def != nil && req.Frontend != "" {
-		return nil, errors.New("cannot solve with both Definition and Frontend specified")
+	edge, err := Load(def, dpc.Load, ValidateEntitlements(ent), WithCacheSources(cms), RuntimePlatforms(b.platforms), WithValidateCaps())
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to load LLB")
 	}
 	}
 
 
-	if req.Definition != nil && req.Definition.Def != nil {
-		ent, err := loadEntitlements(b.builder)
-		if err != nil {
-			return nil, err
+	if len(dpc.ids) > 0 {
+		ids := make([]string, 0, len(dpc.ids))
+		for id := range dpc.ids {
+			ids = append(ids, id)
 		}
 		}
-		dpc := &detectPrunedCacheID{}
-
-		edge, err := Load(req.Definition, dpc.Load, ValidateEntitlements(ent), WithCacheSources(cms), RuntimePlatforms(b.platforms), WithValidateCaps())
-		if err != nil {
-			return nil, errors.Wrap(err, "failed to load LLB")
+		if err := b.eachWorker(func(w worker.Worker) error {
+			return w.PruneCacheMounts(ctx, ids)
+		}); err != nil {
+			return nil, err
 		}
 		}
+	}
 
 
-		if len(dpc.ids) > 0 {
-			ids := make([]string, 0, len(dpc.ids))
-			for id := range dpc.ids {
-				ids = append(ids, id)
-			}
-			if err := b.eachWorker(func(w worker.Worker) error {
-				return w.PruneCacheMounts(ctx, ids)
-			}); err != nil {
-				return nil, err
-			}
+	res, err := b.builder.Build(ctx, edge)
+	if err != nil {
+		return nil, err
+	}
+	wr, ok := res.Sys().(*worker.WorkerRef)
+	if !ok {
+		return nil, errors.Errorf("invalid reference for exporting: %T", res.Sys())
+	}
+	if wr.ImmutableRef != nil {
+		if err := wr.ImmutableRef.Finalize(ctx, false); err != nil {
+			return nil, err
 		}
 		}
+	}
+	return res, err
+}
 
 
-		ref, err := b.builder.Build(ctx, edge)
-		if err != nil {
-			return nil, errors.Wrap(err, "failed to build LLB")
-		}
+func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res *frontend.Result, err error) {
+	if req.Definition != nil && req.Definition.Def != nil && req.Frontend != "" {
+		return nil, errors.New("cannot solve with both Definition and Frontend specified")
+	}
 
 
-		res = &frontend.Result{Ref: ref}
+	if req.Definition != nil && req.Definition.Def != nil {
+		res = &frontend.Result{Ref: newResultProxy(b, req)}
 	} else if req.Frontend != "" {
 	} else if req.Frontend != "" {
 		f, ok := b.frontends[req.Frontend]
 		f, ok := b.frontends[req.Frontend]
 		if !ok {
 		if !ok {
 			return nil, errors.Errorf("invalid frontend: %s", req.Frontend)
 			return nil, errors.Errorf("invalid frontend: %s", req.Frontend)
 		}
 		}
-		res, err = f.Solve(ctx, b, req.FrontendOpt)
+		res, err = f.Solve(ctx, b, req.FrontendOpt, req.FrontendInputs)
 		if err != nil {
 		if err != nil {
 			return nil, errors.Wrapf(err, "failed to solve with frontend %s", req.Frontend)
 			return nil, errors.Wrapf(err, "failed to solve with frontend %s", req.Frontend)
 		}
 		}
@@ -130,21 +141,86 @@ func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res *
 		return &frontend.Result{}, nil
 		return &frontend.Result{}, nil
 	}
 	}
 
 
-	if err := res.EachRef(func(r solver.CachedResult) error {
-		wr, ok := r.Sys().(*worker.WorkerRef)
-		if !ok {
-			return errors.Errorf("invalid reference for exporting: %T", r.Sys())
+	return
+}
+
+type resultProxy struct {
+	cb       func(context.Context) (solver.CachedResult, error)
+	def      *pb.Definition
+	g        flightcontrol.Group
+	mu       sync.Mutex
+	released bool
+	v        solver.CachedResult
+	err      error
+}
+
+func newResultProxy(b *llbBridge, req frontend.SolveRequest) *resultProxy {
+	return &resultProxy{
+		def: req.Definition,
+		cb: func(ctx context.Context) (solver.CachedResult, error) {
+			return b.loadResult(ctx, req.Definition, req.CacheImports)
+		},
+	}
+}
+
+func (rp *resultProxy) Definition() *pb.Definition {
+	return rp.def
+}
+
+func (rp *resultProxy) Release(ctx context.Context) error {
+	rp.mu.Lock()
+	defer rp.mu.Unlock()
+	if rp.v != nil {
+		if rp.released {
+			logrus.Warnf("release of already released result")
 		}
 		}
-		if wr.ImmutableRef != nil {
-			if err := wr.ImmutableRef.Finalize(ctx, false); err != nil {
-				return err
+		if err := rp.v.Release(ctx); err != nil {
+			return err
+		}
+	}
+	rp.released = true
+	return nil
+}
+
+func (rp *resultProxy) Result(ctx context.Context) (solver.CachedResult, error) {
+	r, err := rp.g.Do(ctx, "result", func(ctx context.Context) (interface{}, error) {
+		rp.mu.Lock()
+		if rp.released {
+			rp.mu.Unlock()
+			return nil, errors.Errorf("accessing released result")
+		}
+		if rp.v != nil || rp.err != nil {
+			rp.mu.Unlock()
+			return rp.v, rp.err
+		}
+		rp.mu.Unlock()
+		v, err := rp.cb(ctx)
+		if err != nil {
+			select {
+			case <-ctx.Done():
+				if strings.Contains(err.Error(), context.Canceled.Error()) {
+					return v, err
+				}
+			default:
 			}
 			}
 		}
 		}
-		return nil
-	}); err != nil {
-		return nil, err
+		rp.mu.Lock()
+		if rp.released {
+			if v != nil {
+				v.Release(context.TODO())
+			}
+			rp.mu.Unlock()
+			return nil, errors.Errorf("evaluating released result")
+		}
+		rp.v = v
+		rp.err = err
+		rp.mu.Unlock()
+		return v, err
+	})
+	if r != nil {
+		return r.(solver.CachedResult), nil
 	}
 	}
-	return
+	return nil, err
 }
 }
 
 
 func (s *llbBridge) Exec(ctx context.Context, meta executor.Meta, root cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) (err error) {
 func (s *llbBridge) Exec(ctx context.Context, meta executor.Meta, root cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) (err error) {
@@ -158,7 +234,7 @@ func (s *llbBridge) Exec(ctx context.Context, meta executor.Meta, root cache.Imm
 	return err
 	return err
 }
 }
 
 
-func (s *llbBridge) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (dgst digest.Digest, config []byte, err error) {
+func (s *llbBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (dgst digest.Digest, config []byte, err error) {
 	w, err := s.resolveWorker()
 	w, err := s.resolveWorker()
 	if err != nil {
 	if err != nil {
 		return "", nil, err
 		return "", nil, err
@@ -235,3 +311,14 @@ func newLazyCacheManager(id string, fn func() (solver.CacheManager, error)) solv
 	}()
 	}()
 	return lcm
 	return lcm
 }
 }
+
+func cmKey(im gw.CacheOptionsEntry) (string, error) {
+	if im.Type == "registry" && im.Attrs["ref"] != "" {
+		return im.Attrs["ref"], nil
+	}
+	i, err := hashstructure.Hash(im, nil)
+	if err != nil {
+		return "", err
+	}
+	return fmt.Sprintf("%s:%d", im.Type, i), nil
+}

+ 70 - 25
vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go

@@ -26,33 +26,58 @@ func timestampToTime(ts int64) *time.Time {
 	return &tm
 	return &tm
 }
 }
 
 
-func mapUser(user *copy.ChownOpt, idmap *idtools.IdentityMapping) (*copy.ChownOpt, error) {
-	if idmap == nil || user == nil {
-		return user, nil
-	}
-	identity, err := idmap.ToHost(idtools.Identity{
-		UID: user.Uid,
-		GID: user.Gid,
-	})
-	if err != nil {
-		return nil, err
+func mapUserToChowner(user *copy.User, idmap *idtools.IdentityMapping) (copy.Chowner, error) {
+	if user == nil {
+		return func(old *copy.User) (*copy.User, error) {
+			if old == nil {
+				if idmap == nil {
+					return nil, nil
+				}
+				old = &copy.User{} // root
+			}
+			if idmap != nil {
+				identity, err := idmap.ToHost(idtools.Identity{
+					UID: old.Uid,
+					GID: old.Gid,
+				})
+				if err != nil {
+					return nil, err
+				}
+				return &copy.User{Uid: identity.UID, Gid: identity.GID}, nil
+			}
+			return old, nil
+		}, nil
+	}
+	u := *user
+	if idmap != nil {
+		identity, err := idmap.ToHost(idtools.Identity{
+			UID: user.Uid,
+			GID: user.Gid,
+		})
+		if err != nil {
+			return nil, err
+		}
+		u.Uid = identity.UID
+		u.Gid = identity.GID
 	}
 	}
-	return &copy.ChownOpt{Uid: identity.UID, Gid: identity.GID}, nil
+	return func(*copy.User) (*copy.User, error) {
+		return &u, nil
+	}, nil
 }
 }
 
 
-func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *copy.ChownOpt, idmap *idtools.IdentityMapping) error {
+func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *copy.User, idmap *idtools.IdentityMapping) error {
 	p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path)))
 	p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path)))
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	user, err = mapUser(user, idmap)
+	ch, err := mapUserToChowner(user, idmap)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
 	if action.MakeParents {
 	if action.MakeParents {
-		if err := copy.MkdirAll(p, os.FileMode(action.Mode)&0777, user, timestampToTime(action.Timestamp)); err != nil {
+		if err := copy.MkdirAll(p, os.FileMode(action.Mode)&0777, ch, timestampToTime(action.Timestamp)); err != nil {
 			return err
 			return err
 		}
 		}
 	} else {
 	} else {
@@ -62,7 +87,7 @@ func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *copy.
 			}
 			}
 			return err
 			return err
 		}
 		}
-		if err := copy.Chown(p, user); err != nil {
+		if err := copy.Chown(p, nil, ch); err != nil {
 			return err
 			return err
 		}
 		}
 		if err := copy.Utimes(p, timestampToTime(action.Timestamp)); err != nil {
 		if err := copy.Utimes(p, timestampToTime(action.Timestamp)); err != nil {
@@ -73,13 +98,13 @@ func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *copy.
 	return nil
 	return nil
 }
 }
 
 
-func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *copy.ChownOpt, idmap *idtools.IdentityMapping) error {
+func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *copy.User, idmap *idtools.IdentityMapping) error {
 	p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path)))
 	p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path)))
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	user, err = mapUser(user, idmap)
+	ch, err := mapUserToChowner(user, idmap)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -88,7 +113,7 @@ func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *cop
 		return err
 		return err
 	}
 	}
 
 
-	if err := copy.Chown(p, user); err != nil {
+	if err := copy.Chown(p, nil, ch); err != nil {
 		return err
 		return err
 	}
 	}
 
 
@@ -100,13 +125,33 @@ func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *cop
 }
 }
 
 
 func rm(ctx context.Context, d string, action pb.FileActionRm) error {
 func rm(ctx context.Context, d string, action pb.FileActionRm) error {
-	p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path)))
+	if action.AllowWildcard {
+		src := cleanPath(action.Path)
+		m, err := copy.ResolveWildcards(d, src, false)
+		if err != nil {
+			return err
+		}
+
+		for _, s := range m {
+			if err := rmPath(d, s, action.AllowNotFound); err != nil {
+				return err
+			}
+		}
+
+		return nil
+	}
+
+	return rmPath(d, action.Path, action.AllowNotFound)
+}
+
+func rmPath(root, src string, allowNotFound bool) error {
+	p, err := fs.RootPath(root, filepath.Join(filepath.Join("/", src)))
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
 	if err := os.RemoveAll(p); err != nil {
 	if err := os.RemoveAll(p); err != nil {
-		if os.IsNotExist(errors.Cause(err)) && action.AllowNotFound {
+		if os.IsNotExist(errors.Cause(err)) && allowNotFound {
 			return nil
 			return nil
 		}
 		}
 		return err
 		return err
@@ -115,7 +160,7 @@ func rm(ctx context.Context, d string, action pb.FileActionRm) error {
 	return nil
 	return nil
 }
 }
 
 
-func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *copy.ChownOpt, idmap *idtools.IdentityMapping) error {
+func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *copy.User, idmap *idtools.IdentityMapping) error {
 	srcPath := cleanPath(action.Src)
 	srcPath := cleanPath(action.Src)
 	destPath := cleanPath(action.Dest)
 	destPath := cleanPath(action.Dest)
 
 
@@ -134,14 +179,14 @@ func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *
 		return nil
 		return nil
 	}
 	}
 
 
-	u, err := mapUser(u, idmap)
+	ch, err := mapUserToChowner(u, idmap)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
 	opt := []copy.Opt{
 	opt := []copy.Opt{
 		func(ci *copy.CopyInfo) {
 		func(ci *copy.CopyInfo) {
-			ci.Chown = u
+			ci.Chown = ch
 			ci.Utime = timestampToTime(action.Timestamp)
 			ci.Utime = timestampToTime(action.Timestamp)
 			if m := int(action.Mode); m != -1 {
 			if m := int(action.Mode); m != -1 {
 				ci.Mode = &m
 				ci.Mode = &m
@@ -154,7 +199,7 @@ func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *
 
 
 	if !action.AllowWildcard {
 	if !action.AllowWildcard {
 		if action.AttemptUnpackDockerCompatibility {
 		if action.AttemptUnpackDockerCompatibility {
-			if ok, err := unpack(ctx, src, srcPath, dest, destPath, u, timestampToTime(action.Timestamp)); err != nil {
+			if ok, err := unpack(ctx, src, srcPath, dest, destPath, ch, timestampToTime(action.Timestamp)); err != nil {
 				return err
 				return err
 			} else if ok {
 			} else if ok {
 				return nil
 				return nil
@@ -177,7 +222,7 @@ func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *
 
 
 	for _, s := range m {
 	for _, s := range m {
 		if action.AttemptUnpackDockerCompatibility {
 		if action.AttemptUnpackDockerCompatibility {
-			if ok, err := unpack(ctx, src, s, dest, destPath, u, timestampToTime(action.Timestamp)); err != nil {
+			if ok, err := unpack(ctx, src, s, dest, destPath, ch, timestampToTime(action.Timestamp)); err != nil {
 				return err
 				return err
 			} else if ok {
 			} else if ok {
 				continue
 				continue

+ 2 - 2
vendor/github.com/moby/buildkit/solver/llbsolver/file/unpack.go

@@ -12,7 +12,7 @@ import (
 	copy "github.com/tonistiigi/fsutil/copy"
 	copy "github.com/tonistiigi/fsutil/copy"
 )
 )
 
 
-func unpack(ctx context.Context, srcRoot string, src string, destRoot string, dest string, user *copy.ChownOpt, tm *time.Time) (bool, error) {
+func unpack(ctx context.Context, srcRoot string, src string, destRoot string, dest string, ch copy.Chowner, tm *time.Time) (bool, error) {
 	src, err := fs.RootPath(srcRoot, src)
 	src, err := fs.RootPath(srcRoot, src)
 	if err != nil {
 	if err != nil {
 		return false, err
 		return false, err
@@ -25,7 +25,7 @@ func unpack(ctx context.Context, srcRoot string, src string, destRoot string, de
 	if err != nil {
 	if err != nil {
 		return false, err
 		return false, err
 	}
 	}
-	if err := copy.MkdirAll(dest, 0755, user, tm); err != nil {
+	if err := copy.MkdirAll(dest, 0755, ch, tm); err != nil {
 		return false, err
 		return false, err
 	}
 	}
 
 

+ 2 - 2
vendor/github.com/moby/buildkit/solver/llbsolver/file/user_linux.go

@@ -12,11 +12,11 @@ import (
 	copy "github.com/tonistiigi/fsutil/copy"
 	copy "github.com/tonistiigi/fsutil/copy"
 )
 )
 
 
-func readUser(chopt *pb.ChownOpt, mu, mg fileoptypes.Mount) (*copy.ChownOpt, error) {
+func readUser(chopt *pb.ChownOpt, mu, mg fileoptypes.Mount) (*copy.User, error) {
 	if chopt == nil {
 	if chopt == nil {
 		return nil, nil
 		return nil, nil
 	}
 	}
-	var us copy.ChownOpt
+	var us copy.User
 	if chopt.User != nil {
 	if chopt.User != nil {
 		switch u := chopt.User.User.(type) {
 		switch u := chopt.User.User.(type) {
 		case *pb.UserOpt_ByName:
 		case *pb.UserOpt_ByName:

+ 1 - 1
vendor/github.com/moby/buildkit/solver/llbsolver/file/user_nolinux.go

@@ -9,6 +9,6 @@ import (
 	copy "github.com/tonistiigi/fsutil/copy"
 	copy "github.com/tonistiigi/fsutil/copy"
 )
 )
 
 
-func readUser(chopt *pb.ChownOpt, mu, mg fileoptypes.Mount) (*copy.ChownOpt, error) {
+func readUser(chopt *pb.ChownOpt, mu, mg fileoptypes.Mount) (*copy.User, error) {
 	return nil, errors.New("only implemented in linux")
 	return nil, errors.New("only implemented in linux")
 }
 }

+ 6 - 1
vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go

@@ -132,5 +132,10 @@ func (b *buildOp) Exec(ctx context.Context, inputs []solver.Result) (outputs []s
 		r.Release(context.TODO())
 		r.Release(context.TODO())
 	}
 	}
 
 
-	return []solver.Result{newRes.Ref}, err
+	r, err := newRes.Ref.Result(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	return []solver.Result{r}, err
 }
 }

+ 52 - 19
vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go

@@ -56,7 +56,8 @@ type execOp struct {
 	platform  *pb.Platform
 	platform  *pb.Platform
 	numInputs int
 	numInputs int
 
 
-	cacheMounts map[string]*cacheRefShare
+	cacheMounts   map[string]*cacheRefShare
+	cacheMountsMu sync.Mutex
 }
 }
 
 
 func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache.Manager, sm *session.Manager, md *metadata.Store, exec executor.Executor, w worker.Worker) (solver.Op, error) {
 func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache.Manager, sm *session.Manager, md *metadata.Store, exec executor.Executor, w worker.Worker) (solver.Op, error) {
@@ -221,56 +222,75 @@ func (e *execOp) getMountDeps() ([]dep, error) {
 }
 }
 
 
 func (e *execOp) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, m *pb.Mount, sharing pb.CacheSharingOpt) (mref cache.MutableRef, err error) {
 func (e *execOp) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, m *pb.Mount, sharing pb.CacheSharingOpt) (mref cache.MutableRef, err error) {
+	g := &cacheRefGetter{
+		locker:          &e.cacheMountsMu,
+		cacheMounts:     e.cacheMounts,
+		cm:              e.cm,
+		md:              e.md,
+		globalCacheRefs: sharedCacheRefs,
+		name:            fmt.Sprintf("cached mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " ")),
+	}
+	return g.getRefCacheDir(ctx, ref, id, sharing)
+}
+
+type cacheRefGetter struct {
+	locker          sync.Locker
+	cacheMounts     map[string]*cacheRefShare
+	cm              cache.Manager
+	md              *metadata.Store
+	globalCacheRefs *cacheRefs
+	name            string
+}
+
+func (g *cacheRefGetter) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, sharing pb.CacheSharingOpt) (mref cache.MutableRef, err error) {
 	key := "cache-dir:" + id
 	key := "cache-dir:" + id
 	if ref != nil {
 	if ref != nil {
 		key += ":" + ref.ID()
 		key += ":" + ref.ID()
 	}
 	}
-	mu := CacheMountsLocker()
+	mu := g.locker
 	mu.Lock()
 	mu.Lock()
 	defer mu.Unlock()
 	defer mu.Unlock()
 
 
-	if ref, ok := e.cacheMounts[key]; ok {
+	if ref, ok := g.cacheMounts[key]; ok {
 		return ref.clone(), nil
 		return ref.clone(), nil
 	}
 	}
 	defer func() {
 	defer func() {
 		if err == nil {
 		if err == nil {
 			share := &cacheRefShare{MutableRef: mref, refs: map[*cacheRef]struct{}{}}
 			share := &cacheRefShare{MutableRef: mref, refs: map[*cacheRef]struct{}{}}
-			e.cacheMounts[key] = share
+			g.cacheMounts[key] = share
 			mref = share.clone()
 			mref = share.clone()
 		}
 		}
 	}()
 	}()
 
 
 	switch sharing {
 	switch sharing {
 	case pb.CacheSharingOpt_SHARED:
 	case pb.CacheSharingOpt_SHARED:
-		return sharedCacheRefs.get(key, func() (cache.MutableRef, error) {
-			return e.getRefCacheDirNoCache(ctx, key, ref, id, m, false)
+		return g.globalCacheRefs.get(key, func() (cache.MutableRef, error) {
+			return g.getRefCacheDirNoCache(ctx, key, ref, id, false)
 		})
 		})
 	case pb.CacheSharingOpt_PRIVATE:
 	case pb.CacheSharingOpt_PRIVATE:
-		return e.getRefCacheDirNoCache(ctx, key, ref, id, m, false)
+		return g.getRefCacheDirNoCache(ctx, key, ref, id, false)
 	case pb.CacheSharingOpt_LOCKED:
 	case pb.CacheSharingOpt_LOCKED:
-		return e.getRefCacheDirNoCache(ctx, key, ref, id, m, true)
+		return g.getRefCacheDirNoCache(ctx, key, ref, id, true)
 	default:
 	default:
 		return nil, errors.Errorf("invalid cache sharing option: %s", sharing.String())
 		return nil, errors.Errorf("invalid cache sharing option: %s", sharing.String())
 	}
 	}
-
 }
 }
 
 
-func (e *execOp) getRefCacheDirNoCache(ctx context.Context, key string, ref cache.ImmutableRef, id string, m *pb.Mount, block bool) (cache.MutableRef, error) {
+func (g *cacheRefGetter) getRefCacheDirNoCache(ctx context.Context, key string, ref cache.ImmutableRef, id string, block bool) (cache.MutableRef, error) {
 	makeMutable := func(ref cache.ImmutableRef) (cache.MutableRef, error) {
 	makeMutable := func(ref cache.ImmutableRef) (cache.MutableRef, error) {
-		desc := fmt.Sprintf("cached mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " "))
-		return e.cm.New(ctx, ref, cache.WithRecordType(client.UsageRecordTypeCacheMount), cache.WithDescription(desc), cache.CachePolicyRetain)
+		return g.cm.New(ctx, ref, cache.WithRecordType(client.UsageRecordTypeCacheMount), cache.WithDescription(g.name), cache.CachePolicyRetain)
 	}
 	}
 
 
 	cacheRefsLocker.Lock(key)
 	cacheRefsLocker.Lock(key)
 	defer cacheRefsLocker.Unlock(key)
 	defer cacheRefsLocker.Unlock(key)
 	for {
 	for {
-		sis, err := e.md.Search(key)
+		sis, err := g.md.Search(key)
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
 		locked := false
 		locked := false
 		for _, si := range sis {
 		for _, si := range sis {
-			if mRef, err := e.cm.GetMutable(ctx, si.ID()); err == nil {
+			if mRef, err := g.cm.GetMutable(ctx, si.ID()); err == nil {
 				logrus.Debugf("reusing ref for cache dir: %s", mRef.ID())
 				logrus.Debugf("reusing ref for cache dir: %s", mRef.ID())
 				return mRef, nil
 				return mRef, nil
 			} else if errors.Cause(err) == cache.ErrLocked {
 			} else if errors.Cause(err) == cache.ErrLocked {
@@ -295,7 +315,7 @@ func (e *execOp) getRefCacheDirNoCache(ctx context.Context, key string, ref cach
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	si, _ := e.md.Get(mRef.ID())
+	si, _ := g.md.Get(mRef.ID())
 	v, err := metadata.NewValue(key)
 	v, err := metadata.NewValue(key)
 	if err != nil {
 	if err != nil {
 		mRef.Release(context.TODO())
 		mRef.Release(context.TODO())
@@ -525,7 +545,7 @@ func (sm *secretMountInstance) Mount() ([]mount.Mount, func() error, error) {
 	return []mount.Mount{{
 	return []mount.Mount{{
 		Type:    "bind",
 		Type:    "bind",
 		Source:  fp,
 		Source:  fp,
-		Options: []string{"ro", "rbind"},
+		Options: []string{"ro", "rbind", "nodev", "nosuid", "noexec"},
 	}}, cleanup, nil
 	}}, cleanup, nil
 }
 }
 
 
@@ -797,6 +817,9 @@ func CacheMountsLocker() sync.Locker {
 }
 }
 
 
 func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache.MutableRef, error) {
 func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache.MutableRef, error) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
 	if r.shares == nil {
 	if r.shares == nil {
 		r.shares = map[string]*cacheRefShare{}
 		r.shares = map[string]*cacheRefShare{}
 	}
 	}
@@ -813,7 +836,6 @@ func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache.
 
 
 	share = &cacheRefShare{MutableRef: mref, main: r, key: key, refs: map[*cacheRef]struct{}{}}
 	share = &cacheRefShare{MutableRef: mref, main: r, key: key, refs: map[*cacheRef]struct{}{}}
 	r.shares[key] = share
 	r.shares[key] = share
-
 	return share.clone(), nil
 	return share.clone(), nil
 }
 }
 
 
@@ -827,6 +849,9 @@ type cacheRefShare struct {
 
 
 func (r *cacheRefShare) clone() cache.MutableRef {
 func (r *cacheRefShare) clone() cache.MutableRef {
 	cacheRef := &cacheRef{cacheRefShare: r}
 	cacheRef := &cacheRef{cacheRefShare: r}
+	if cacheRefCloneHijack != nil {
+		cacheRefCloneHijack()
+	}
 	r.mu.Lock()
 	r.mu.Lock()
 	r.refs[cacheRef] = struct{}{}
 	r.refs[cacheRef] = struct{}{}
 	r.mu.Unlock()
 	r.mu.Unlock()
@@ -835,22 +860,30 @@ func (r *cacheRefShare) clone() cache.MutableRef {
 
 
 func (r *cacheRefShare) release(ctx context.Context) error {
 func (r *cacheRefShare) release(ctx context.Context) error {
 	if r.main != nil {
 	if r.main != nil {
-		r.main.mu.Lock()
-		defer r.main.mu.Unlock()
 		delete(r.main.shares, r.key)
 		delete(r.main.shares, r.key)
 	}
 	}
 	return r.MutableRef.Release(ctx)
 	return r.MutableRef.Release(ctx)
 }
 }
 
 
+var cacheRefReleaseHijack func()
+var cacheRefCloneHijack func()
+
 type cacheRef struct {
 type cacheRef struct {
 	*cacheRefShare
 	*cacheRefShare
 }
 }
 
 
 func (r *cacheRef) Release(ctx context.Context) error {
 func (r *cacheRef) Release(ctx context.Context) error {
+	if r.main != nil {
+		r.main.mu.Lock()
+		defer r.main.mu.Unlock()
+	}
 	r.mu.Lock()
 	r.mu.Lock()
 	defer r.mu.Unlock()
 	defer r.mu.Unlock()
 	delete(r.refs, r)
 	delete(r.refs, r)
 	if len(r.refs) == 0 {
 	if len(r.refs) == 0 {
+		if cacheRefReleaseHijack != nil {
+			cacheRefReleaseHijack()
+		}
 		return r.release(ctx)
 		return r.release(ctx)
 	}
 	}
 	return nil
 	return nil

+ 35 - 10
vendor/github.com/moby/buildkit/solver/llbsolver/solver.go

@@ -22,6 +22,7 @@ import (
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
 	specs "github.com/opencontainers/image-spec/specs-go/v1"
 	specs "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
+	"golang.org/x/sync/errgroup"
 )
 )
 
 
 const keyEntitlements = "llb.entitlements"
 const keyEntitlements = "llb.entitlements"
@@ -115,7 +116,7 @@ func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest
 
 
 	var res *frontend.Result
 	var res *frontend.Result
 	if s.gatewayForwarder != nil && req.Definition == nil && req.Frontend == "" {
 	if s.gatewayForwarder != nil && req.Definition == nil && req.Frontend == "" {
-		fwd := gateway.NewBridgeForwarder(ctx, s.Bridge(j), s.workerController)
+		fwd := gateway.NewBridgeForwarder(ctx, s.Bridge(j), s.workerController, req.FrontendInputs)
 		defer fwd.Discard()
 		defer fwd.Discard()
 		if err := s.gatewayForwarder.RegisterBuild(ctx, id, fwd); err != nil {
 		if err := s.gatewayForwarder.RegisterBuild(ctx, id, fwd); err != nil {
 			return nil, err
 			return nil, err
@@ -140,12 +141,24 @@ func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest
 	}
 	}
 
 
 	defer func() {
 	defer func() {
-		res.EachRef(func(ref solver.CachedResult) error {
+		res.EachRef(func(ref solver.ResultProxy) error {
 			go ref.Release(context.TODO())
 			go ref.Release(context.TODO())
 			return nil
 			return nil
 		})
 		})
 	}()
 	}()
 
 
+	eg, ctx2 := errgroup.WithContext(ctx)
+	res.EachRef(func(ref solver.ResultProxy) error {
+		eg.Go(func() error {
+			_, err := ref.Result(ctx2)
+			return err
+		})
+		return nil
+	})
+	if err := eg.Wait(); err != nil {
+		return nil, err
+	}
+
 	var exporterResponse map[string]string
 	var exporterResponse map[string]string
 	if e := exp.Exporter; e != nil {
 	if e := exp.Exporter; e != nil {
 		inp := exporter.Source{
 		inp := exporter.Source{
@@ -155,13 +168,17 @@ func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest
 			inp.Metadata = make(map[string][]byte)
 			inp.Metadata = make(map[string][]byte)
 		}
 		}
 		if res := res.Ref; res != nil {
 		if res := res.Ref; res != nil {
-			workerRef, ok := res.Sys().(*worker.WorkerRef)
+			r, err := res.Result(ctx)
+			if err != nil {
+				return nil, err
+			}
+			workerRef, ok := r.Sys().(*worker.WorkerRef)
 			if !ok {
 			if !ok {
-				return nil, errors.Errorf("invalid reference: %T", res.Sys())
+				return nil, errors.Errorf("invalid reference: %T", r.Sys())
 			}
 			}
 			inp.Ref = workerRef.ImmutableRef
 			inp.Ref = workerRef.ImmutableRef
 
 
-			dt, err := inlineCache(ctx, exp.CacheExporter, res)
+			dt, err := inlineCache(ctx, exp.CacheExporter, r)
 			if err != nil {
 			if err != nil {
 				return nil, err
 				return nil, err
 			}
 			}
@@ -175,13 +192,17 @@ func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest
 				if res == nil {
 				if res == nil {
 					m[k] = nil
 					m[k] = nil
 				} else {
 				} else {
-					workerRef, ok := res.Sys().(*worker.WorkerRef)
+					r, err := res.Result(ctx)
+					if err != nil {
+						return nil, err
+					}
+					workerRef, ok := r.Sys().(*worker.WorkerRef)
 					if !ok {
 					if !ok {
-						return nil, errors.Errorf("invalid reference: %T", res.Sys())
+						return nil, errors.Errorf("invalid reference: %T", r.Sys())
 					}
 					}
 					m[k] = workerRef.ImmutableRef
 					m[k] = workerRef.ImmutableRef
 
 
-					dt, err := inlineCache(ctx, exp.CacheExporter, res)
+					dt, err := inlineCache(ctx, exp.CacheExporter, r)
 					if err != nil {
 					if err != nil {
 						return nil, err
 						return nil, err
 					}
 					}
@@ -205,9 +226,13 @@ func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest
 	if e := exp.CacheExporter; e != nil {
 	if e := exp.CacheExporter; e != nil {
 		if err := inVertexContext(j.Context(ctx), "exporting cache", "", func(ctx context.Context) error {
 		if err := inVertexContext(j.Context(ctx), "exporting cache", "", func(ctx context.Context) error {
 			prepareDone := oneOffProgress(ctx, "preparing build cache for export")
 			prepareDone := oneOffProgress(ctx, "preparing build cache for export")
-			if err := res.EachRef(func(res solver.CachedResult) error {
+			if err := res.EachRef(func(res solver.ResultProxy) error {
+				r, err := res.Result(ctx)
+				if err != nil {
+					return err
+				}
 				// all keys have same export chain so exporting others is not needed
 				// all keys have same export chain so exporting others is not needed
-				_, err := res.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{
+				_, err = r.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{
 					Convert: workerRefConverter,
 					Convert: workerRefConverter,
 					Mode:    exp.CacheExportMode,
 					Mode:    exp.CacheExportMode,
 				})
 				})

+ 16 - 2
vendor/github.com/moby/buildkit/solver/pb/caps.go

@@ -45,7 +45,10 @@ const (
 	CapExecMountSSH                  apicaps.CapID = "exec.mount.ssh"
 	CapExecMountSSH                  apicaps.CapID = "exec.mount.ssh"
 	CapExecCgroupsMounted            apicaps.CapID = "exec.cgroup"
 	CapExecCgroupsMounted            apicaps.CapID = "exec.cgroup"
 
 
-	CapFileBase apicaps.CapID = "file.base"
+	CapExecMetaSecurityDeviceWhitelistV1 apicaps.CapID = "exec.meta.security.devices.v1"
+
+	CapFileBase       apicaps.CapID = "file.base"
+	CapFileRmWildcard apicaps.CapID = "file.rm.wildcard"
 
 
 	CapConstraints apicaps.CapID = "constraints"
 	CapConstraints apicaps.CapID = "constraints"
 	CapPlatform    apicaps.CapID = "platform"
 	CapPlatform    apicaps.CapID = "platform"
@@ -188,6 +191,12 @@ func init() {
 		Status:  apicaps.CapStatusExperimental,
 		Status:  apicaps.CapStatusExperimental,
 	})
 	})
 
 
+	Caps.Init(apicaps.Cap{
+		ID:      CapExecMetaSecurityDeviceWhitelistV1,
+		Enabled: true,
+		Status:  apicaps.CapStatusExperimental,
+	})
+
 	Caps.Init(apicaps.Cap{
 	Caps.Init(apicaps.Cap{
 		ID:      CapExecMountBind,
 		ID:      CapExecMountBind,
 		Enabled: true,
 		Enabled: true,
@@ -252,6 +261,12 @@ func init() {
 		},
 		},
 	})
 	})
 
 
+	Caps.Init(apicaps.Cap{
+		ID:      CapFileRmWildcard,
+		Enabled: true,
+		Status:  apicaps.CapStatusExperimental,
+	})
+
 	Caps.Init(apicaps.Cap{
 	Caps.Init(apicaps.Cap{
 		ID:      CapConstraints,
 		ID:      CapConstraints,
 		Enabled: true,
 		Enabled: true,
@@ -281,5 +296,4 @@ func init() {
 		Enabled: true,
 		Enabled: true,
 		Status:  apicaps.CapStatusExperimental,
 		Status:  apicaps.CapStatusExperimental,
 	})
 	})
-
 }
 }

+ 64 - 18
vendor/github.com/moby/buildkit/solver/types.go

@@ -5,33 +5,48 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/content"
+	"github.com/moby/buildkit/solver/pb"
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 )
 )
 
 
-// Vertex is one node in the build graph
+// Vertex is a node in a build graph. It defines an interface for a
+// content-addressable operation and its inputs.
 type Vertex interface {
 type Vertex interface {
-	// Digest is a content-addressable vertex identifier
+	// Digest returns a checksum of the definition up to the vertex including
+	// all of its inputs.
 	Digest() digest.Digest
 	Digest() digest.Digest
-	// Sys returns an internal value that is used to execute the vertex. Usually
-	// this is capured by the operation resolver method during solve.
+
+	// Sys returns an object used to resolve the executor for this vertex.
+	// In LLB solver, this value would be of type `llb.Op`.
 	Sys() interface{}
 	Sys() interface{}
+
+	// Options return metadata associated with the vertex that doesn't change the
+	// definition or equality check of it.
 	Options() VertexOptions
 	Options() VertexOptions
-	// Array of edges current vertex depends on.
+
+	// Inputs returns an array of edges the vertex depends on. An input edge is
+	// a vertex and an index from the returned array of results from an executor
+	// returned by Sys(). A vertex may have zero inputs.
 	Inputs() []Edge
 	Inputs() []Edge
+
 	Name() string
 	Name() string
 }
 }
 
 
-// Index is a index value for output edge
+// Index is an index value for the return array of an operation. Index starts
+// counting from zero.
 type Index int
 type Index int
 
 
-// Edge is a path to a specific output of the vertex
+// Edge is a connection point between vertexes. An edge references a specific
+// output of a vertex's operation. Edges are used as inputs to other vertexes.
 type Edge struct {
 type Edge struct {
 	Index  Index
 	Index  Index
 	Vertex Vertex
 	Vertex Vertex
 }
 }
 
 
-// VertexOptions has optional metadata for the vertex that is not contained in digest
+// VertexOptions define optional metadata for a vertex that doesn't change the
+// definition or equality check of it. These options are not contained in the
+// vertex digest.
 type VertexOptions struct {
 type VertexOptions struct {
 	IgnoreCache  bool
 	IgnoreCache  bool
 	CacheSources []CacheManager
 	CacheSources []CacheManager
@@ -53,6 +68,12 @@ type CachedResult interface {
 	CacheKeys() []ExportableCacheKey
 	CacheKeys() []ExportableCacheKey
 }
 }
 
 
+type ResultProxy interface {
+	Result(context.Context) (CachedResult, error)
+	Release(context.Context) error
+	Definition() *pb.Definition
+}
+
 // CacheExportMode is the type for setting cache exporting modes
 // CacheExportMode is the type for setting cache exporting modes
 type CacheExportMode int
 type CacheExportMode int
 
 
@@ -110,26 +131,44 @@ type CacheLink struct {
 	Selector digest.Digest `json:",omitempty"`
 	Selector digest.Digest `json:",omitempty"`
 }
 }
 
 
-// Op is an implementation for running a vertex
+// Op defines how the solver can evaluate the properties of a vertex operation.
+// An op is executed in the worker, and is retrieved from the vertex by the
+// value of `vertex.Sys()`. The solver is configured with a resolve function to
+// convert a `vertex.Sys()` into an `Op`.
 type Op interface {
 type Op interface {
 	// CacheMap returns structure describing how the operation is cached.
 	// CacheMap returns structure describing how the operation is cached.
 	// Currently only roots are allowed to return multiple cache maps per op.
 	// Currently only roots are allowed to return multiple cache maps per op.
 	CacheMap(context.Context, int) (*CacheMap, bool, error)
 	CacheMap(context.Context, int) (*CacheMap, bool, error)
+
 	// Exec runs an operation given results from previous operations.
 	// Exec runs an operation given results from previous operations.
 	Exec(ctx context.Context, inputs []Result) (outputs []Result, err error)
 	Exec(ctx context.Context, inputs []Result) (outputs []Result, err error)
 }
 }
 
 
 type ResultBasedCacheFunc func(context.Context, Result) (digest.Digest, error)
 type ResultBasedCacheFunc func(context.Context, Result) (digest.Digest, error)
 
 
+// CacheMap is a description for calculating the cache key of an operation.
 type CacheMap struct {
 type CacheMap struct {
-	// Digest is a base digest for operation that needs to be combined with
-	// inputs cache or selectors for dependencies.
+	// Digest returns a checksum for the operation. The operation result can be
+	// cached by a checksum that combines this digest and the cache keys of the
+	// operation's inputs.
+	//
+	// For example, in LLB this digest is a manifest digest for OCI images, or
+	// commit SHA for git sources.
 	Digest digest.Digest
 	Digest digest.Digest
-	Deps   []struct {
-		// Optional digest that is merged with the cache key of the input
+
+	// Deps contain optional selectors or content-based cache functions for its
+	// inputs.
+	Deps []struct {
+		// Selector is a digest that is merged with the cache key of the input.
+		// Selectors are not merged with the result of the `ComputeDigestFunc` for
+		// this input.
 		Selector digest.Digest
 		Selector digest.Digest
-		// Optional function that returns a digest for the input based on its
-		// return value
+
+		// ComputeDigestFunc should return a digest for the input based on its return
+		// value.
+		//
+		// For example, in LLB this is invoked to calculate the cache key based on
+		// the checksum of file contents from input snapshots.
 		ComputeDigestFunc ResultBasedCacheFunc
 		ComputeDigestFunc ResultBasedCacheFunc
 	}
 	}
 }
 }
@@ -152,17 +191,24 @@ type CacheRecord struct {
 	key          *CacheKey
 	key          *CacheKey
 }
 }
 
 
-// CacheManager implements build cache backend
+// CacheManager determines if there is a result that matches the cache keys
+// generated during the build that could be reused instead of fully
+// reevaluating the vertex and its inputs. There can be multiple cache
+// managers, and specific managers can be defined per vertex using
+// `VertexOptions`.
 type CacheManager interface {
 type CacheManager interface {
 	// ID is used to identify cache providers that are backed by same source
 	// ID is used to identify cache providers that are backed by same source
-	// to avoid duplicate calls to the same provider
+	// to avoid duplicate calls to the same provider.
 	ID() string
 	ID() string
+
 	// Query searches for cache paths from one cache key to the output of a
 	// Query searches for cache paths from one cache key to the output of a
 	// possible match.
 	// possible match.
 	Query(inp []CacheKeyWithSelector, inputIndex Index, dgst digest.Digest, outputIndex Index) ([]*CacheKey, error)
 	Query(inp []CacheKeyWithSelector, inputIndex Index, dgst digest.Digest, outputIndex Index) ([]*CacheKey, error)
 	Records(ck *CacheKey) ([]*CacheRecord, error)
 	Records(ck *CacheKey) ([]*CacheRecord, error)
-	// Load pulls and returns the cached result
+
+	// Load loads a cache record into a result reference.
 	Load(ctx context.Context, rec *CacheRecord) (Result, error)
 	Load(ctx context.Context, rec *CacheRecord) (Result, error)
+
 	// Save saves a result based on a cache key
 	// Save saves a result based on a cache key
 	Save(key *CacheKey, s Result, createdAt time.Time) (*ExportableCacheKey, error)
 	Save(key *CacheKey, s Result, createdAt time.Time) (*ExportableCacheKey, error)
 }
 }

+ 11 - 3
vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go

@@ -1,9 +1,17 @@
 package appdefaults
 package appdefaults
 
 
+import (
+	"os"
+	"path/filepath"
+)
+
 const (
 const (
-	Address   = "npipe:////./pipe/buildkitd"
-	Root      = ".buildstate"
-	ConfigDir = ""
+	Address = "npipe:////./pipe/buildkitd"
+)
+
+var (
+	Root      = filepath.Join(os.Getenv("ProgramData"), "buildkitd", ".buildstate")
+	ConfigDir = filepath.Join(os.Getenv("ProgramData"), "buildkitd")
 )
 )
 
 
 func UserAddress() string {
 func UserAddress() string {

+ 163 - 0
vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go

@@ -0,0 +1,163 @@
+package security
+
+import (
+	"context"
+	"fmt"
+	"os"
+
+	"github.com/containerd/containerd/containers"
+	"github.com/containerd/containerd/oci"
+	"github.com/opencontainers/runc/libcontainer/system"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+	"golang.org/x/sys/unix"
+)
+
+// WithInsecureSpec sets spec with All capability.
+func WithInsecureSpec() oci.SpecOpts {
+	return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
+		addCaps := []string{
+			"CAP_FSETID",
+			"CAP_KILL",
+			"CAP_FOWNER",
+			"CAP_MKNOD",
+			"CAP_CHOWN",
+			"CAP_DAC_OVERRIDE",
+			"CAP_NET_RAW",
+			"CAP_SETGID",
+			"CAP_SETUID",
+			"CAP_SETPCAP",
+			"CAP_SETFCAP",
+			"CAP_NET_BIND_SERVICE",
+			"CAP_SYS_CHROOT",
+			"CAP_AUDIT_WRITE",
+			"CAP_MAC_ADMIN",
+			"CAP_MAC_OVERRIDE",
+			"CAP_DAC_READ_SEARCH",
+			"CAP_SYS_PTRACE",
+			"CAP_SYS_MODULE",
+			"CAP_SYSLOG",
+			"CAP_SYS_RAWIO",
+			"CAP_SYS_ADMIN",
+			"CAP_LINUX_IMMUTABLE",
+			"CAP_SYS_BOOT",
+			"CAP_SYS_NICE",
+			"CAP_SYS_PACCT",
+			"CAP_SYS_TTY_CONFIG",
+			"CAP_SYS_TIME",
+			"CAP_WAKE_ALARM",
+			"CAP_AUDIT_READ",
+			"CAP_AUDIT_CONTROL",
+			"CAP_SYS_RESOURCE",
+			"CAP_BLOCK_SUSPEND",
+			"CAP_IPC_LOCK",
+			"CAP_IPC_OWNER",
+			"CAP_LEASE",
+			"CAP_NET_ADMIN",
+			"CAP_NET_BROADCAST",
+		}
+		for _, cap := range addCaps {
+			s.Process.Capabilities.Bounding = append(s.Process.Capabilities.Bounding, cap)
+			s.Process.Capabilities.Ambient = append(s.Process.Capabilities.Ambient, cap)
+			s.Process.Capabilities.Effective = append(s.Process.Capabilities.Effective, cap)
+			s.Process.Capabilities.Inheritable = append(s.Process.Capabilities.Inheritable, cap)
+			s.Process.Capabilities.Permitted = append(s.Process.Capabilities.Permitted, cap)
+		}
+		s.Linux.ReadonlyPaths = []string{}
+		s.Linux.MaskedPaths = []string{}
+		s.Process.ApparmorProfile = ""
+
+		s.Linux.Resources.Devices = []specs.LinuxDeviceCgroup{
+			{
+				Allow:  true,
+				Type:   "c",
+				Access: "rwm",
+			},
+			{
+				Allow:  true,
+				Type:   "b",
+				Access: "rwm",
+			},
+		}
+
+		if !system.RunningInUserNS() {
+			// Devices automatically mounted on insecure mode
+			s.Linux.Devices = append(s.Linux.Devices, []specs.LinuxDevice{
+				// Writes to this come out as printk's, reads export the buffered printk records. (dmesg)
+				{
+					Path:  "/dev/kmsg",
+					Type:  "c",
+					Major: 1,
+					Minor: 11,
+				},
+				// Cuse (character device in user-space)
+				{
+					Path:  "/dev/cuse",
+					Type:  "c",
+					Major: 10,
+					Minor: 203,
+				},
+				// Fuse (virtual filesystem in user-space)
+				{
+					Path:  "/dev/fuse",
+					Type:  "c",
+					Major: 10,
+					Minor: 229,
+				},
+				// Kernel-based virtual machine (hardware virtualization extensions)
+				{
+					Path:  "/dev/kvm",
+					Type:  "c",
+					Major: 10,
+					Minor: 232,
+				},
+				// TAP/TUN network device
+				{
+					Path:  "/dev/net/tun",
+					Type:  "c",
+					Major: 10,
+					Minor: 200,
+				},
+				// Loopback control device
+				{
+					Path:  "/dev/loop-control",
+					Type:  "c",
+					Major: 10,
+					Minor: 237,
+				},
+			}...)
+
+			loopID, err := getFreeLoopID()
+			if err != nil {
+				logrus.Debugf("failed to get next free loop device: %v", err)
+			}
+
+			for i := 0; i <= loopID+7; i++ {
+				s.Linux.Devices = append(s.Linux.Devices, specs.LinuxDevice{
+					Path:  fmt.Sprintf("/dev/loop%d", i),
+					Type:  "b",
+					Major: 7,
+					Minor: int64(i),
+				})
+			}
+		}
+
+		return nil
+	}
+}
+
+func getFreeLoopID() (int, error) {
+	fd, err := os.OpenFile("/dev/loop-control", os.O_RDWR, 0644)
+	if err != nil {
+		return 0, err
+	}
+	defer fd.Close()
+
+	const _LOOP_CTL_GET_FREE = 0x4C82
+	r1, _, uerr := unix.Syscall(unix.SYS_IOCTL, fd.Fd(), _LOOP_CTL_GET_FREE, 0)
+	if uerr == 0 {
+		return int(r1), nil
+	}
+	return 0, errors.Errorf("error getting free loop device: %v", uerr)
+}

+ 0 - 67
vendor/github.com/moby/buildkit/util/entitlements/security_linux.go

@@ -1,67 +0,0 @@
-package entitlements
-
-import (
-	"context"
-
-	"github.com/containerd/containerd/containers"
-	"github.com/containerd/containerd/oci"
-	specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-// WithInsecureSpec sets spec with All capability.
-func WithInsecureSpec() oci.SpecOpts {
-	return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
-		addCaps := []string{
-			"CAP_FSETID",
-			"CAP_KILL",
-			"CAP_FOWNER",
-			"CAP_MKNOD",
-			"CAP_CHOWN",
-			"CAP_DAC_OVERRIDE",
-			"CAP_NET_RAW",
-			"CAP_SETGID",
-			"CAP_SETUID",
-			"CAP_SETPCAP",
-			"CAP_SETFCAP",
-			"CAP_NET_BIND_SERVICE",
-			"CAP_SYS_CHROOT",
-			"CAP_AUDIT_WRITE",
-			"CAP_MAC_ADMIN",
-			"CAP_MAC_OVERRIDE",
-			"CAP_DAC_READ_SEARCH",
-			"CAP_SYS_PTRACE",
-			"CAP_SYS_MODULE",
-			"CAP_SYSLOG",
-			"CAP_SYS_RAWIO",
-			"CAP_SYS_ADMIN",
-			"CAP_LINUX_IMMUTABLE",
-			"CAP_SYS_BOOT",
-			"CAP_SYS_NICE",
-			"CAP_SYS_PACCT",
-			"CAP_SYS_TTY_CONFIG",
-			"CAP_SYS_TIME",
-			"CAP_WAKE_ALARM",
-			"CAP_AUDIT_READ",
-			"CAP_AUDIT_CONTROL",
-			"CAP_SYS_RESOURCE",
-			"CAP_BLOCK_SUSPEND",
-			"CAP_IPC_LOCK",
-			"CAP_IPC_OWNER",
-			"CAP_LEASE",
-			"CAP_NET_ADMIN",
-			"CAP_NET_BROADCAST",
-		}
-		for _, cap := range addCaps {
-			s.Process.Capabilities.Bounding = append(s.Process.Capabilities.Bounding, cap)
-			s.Process.Capabilities.Ambient = append(s.Process.Capabilities.Ambient, cap)
-			s.Process.Capabilities.Effective = append(s.Process.Capabilities.Effective, cap)
-			s.Process.Capabilities.Inheritable = append(s.Process.Capabilities.Inheritable, cap)
-			s.Process.Capabilities.Permitted = append(s.Process.Capabilities.Permitted, cap)
-		}
-		s.Linux.ReadonlyPaths = []string{}
-		s.Linux.MaskedPaths = []string{}
-		s.Process.ApparmorProfile = ""
-
-		return nil
-	}
-}

+ 3 - 1
vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go

@@ -116,7 +116,9 @@ func newCall(fn func(ctx context.Context) (interface{}, error)) *call {
 
 
 func (c *call) run() {
 func (c *call) run() {
 	defer c.closeProgressWriter()
 	defer c.closeProgressWriter()
-	v, err := c.fn(c.ctx)
+	ctx, cancel := context.WithCancel(c.ctx)
+	defer cancel()
+	v, err := c.fn(ctx)
 	c.mu.Lock()
 	c.mu.Lock()
 	c.result = v
 	c.result = v
 	c.err = err
 	c.err = err

+ 2 - 1
vendor/github.com/moby/buildkit/util/imageutil/config.go

@@ -12,6 +12,7 @@ import (
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/reference"
 	"github.com/containerd/containerd/reference"
 	"github.com/containerd/containerd/remotes"
 	"github.com/containerd/containerd/remotes"
+	"github.com/containerd/containerd/remotes/docker"
 	"github.com/moby/buildkit/util/leaseutil"
 	"github.com/moby/buildkit/util/leaseutil"
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
 	specs "github.com/opencontainers/image-spec/specs-go/v1"
 	specs "github.com/opencontainers/image-spec/specs-go/v1"
@@ -152,7 +153,7 @@ func childrenConfigHandler(provider content.Provider, platform platforms.MatchCo
 			} else {
 			} else {
 				descs = append(descs, index.Manifests...)
 				descs = append(descs, index.Manifests...)
 			}
 			}
-		case images.MediaTypeDockerSchema2Config, specs.MediaTypeImageConfig:
+		case images.MediaTypeDockerSchema2Config, specs.MediaTypeImageConfig, docker.LegacyConfigMediaType:
 			// childless data types.
 			// childless data types.
 			return nil, nil
 			return nil, nil
 		default:
 		default:

+ 2 - 2
vendor/github.com/moby/buildkit/worker/worker.go

@@ -7,10 +7,10 @@ import (
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/content"
 	"github.com/moby/buildkit/cache"
 	"github.com/moby/buildkit/cache"
 	"github.com/moby/buildkit/client"
 	"github.com/moby/buildkit/client"
+	"github.com/moby/buildkit/client/llb"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/exporter"
 	"github.com/moby/buildkit/exporter"
 	"github.com/moby/buildkit/frontend"
 	"github.com/moby/buildkit/frontend"
-	gw "github.com/moby/buildkit/frontend/gateway/client"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/solver"
 	"github.com/moby/buildkit/solver"
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
@@ -26,7 +26,7 @@ type Worker interface {
 	LoadRef(id string, hidden bool) (cache.ImmutableRef, error)
 	LoadRef(id string, hidden bool) (cache.ImmutableRef, error)
 	// ResolveOp resolves Vertex.Sys() to Op implementation.
 	// ResolveOp resolves Vertex.Sys() to Op implementation.
 	ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error)
 	ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error)
-	ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error)
+	ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error)
 	// Exec is similar to executor.Exec but without []mount.Mount
 	// Exec is similar to executor.Exec but without []mount.Mount
 	Exec(ctx context.Context, meta executor.Meta, rootFS cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error
 	Exec(ctx context.Context, meta executor.Meta, rootFS cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error
 	DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error)
 	DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error)

+ 9 - 5
vendor/github.com/tonistiigi/fsutil/copy/copy.go

@@ -146,14 +146,16 @@ func (c *copier) prepareTargetDir(srcFollowed, src, destPath string, copyDirCont
 	return destPath, nil
 	return destPath, nil
 }
 }
 
 
-type ChownOpt struct {
+type User struct {
 	Uid, Gid int
 	Uid, Gid int
 }
 }
 
 
+type Chowner func(*User) (*User, error)
+
 type XAttrErrorHandler func(dst, src, xattrKey string, err error) error
 type XAttrErrorHandler func(dst, src, xattrKey string, err error) error
 
 
 type CopyInfo struct {
 type CopyInfo struct {
-	Chown             *ChownOpt
+	Chown             Chowner
 	Utime             *time.Time
 	Utime             *time.Time
 	AllowWildcards    bool
 	AllowWildcards    bool
 	Mode              *int
 	Mode              *int
@@ -172,7 +174,9 @@ func WithCopyInfo(ci CopyInfo) func(*CopyInfo) {
 
 
 func WithChown(uid, gid int) Opt {
 func WithChown(uid, gid int) Opt {
 	return func(ci *CopyInfo) {
 	return func(ci *CopyInfo) {
-		ci.Chown = &ChownOpt{Uid: uid, Gid: gid}
+		ci.Chown = func(*User) (*User, error) {
+			return &User{Uid: uid, Gid: gid}, nil
+		}
 	}
 	}
 }
 }
 
 
@@ -194,14 +198,14 @@ func AllowXAttrErrors(ci *CopyInfo) {
 }
 }
 
 
 type copier struct {
 type copier struct {
-	chown             *ChownOpt
+	chown             Chowner
 	utime             *time.Time
 	utime             *time.Time
 	mode              *int
 	mode              *int
 	inodes            map[uint64]string
 	inodes            map[uint64]string
 	xattrErrorHandler XAttrErrorHandler
 	xattrErrorHandler XAttrErrorHandler
 }
 }
 
 
-func newCopier(chown *ChownOpt, tm *time.Time, mode *int, xeh XAttrErrorHandler) *copier {
+func newCopier(chown Chowner, tm *time.Time, mode *int, xeh XAttrErrorHandler) *copier {
 	if xeh == nil {
 	if xeh == nil {
 		xeh = func(dst, src, key string, err error) error {
 		xeh = func(dst, src, key string, err error) error {
 			return err
 			return err

+ 7 - 5
vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go

@@ -6,7 +6,6 @@ import (
 	"os"
 	"os"
 	"syscall"
 	"syscall"
 
 
-	"github.com/containerd/containerd/sys"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"golang.org/x/sys/unix"
 	"golang.org/x/sys/unix"
 )
 )
@@ -20,11 +19,14 @@ func (c *copier) copyFileInfo(fi os.FileInfo, name string) error {
 	st := fi.Sys().(*syscall.Stat_t)
 	st := fi.Sys().(*syscall.Stat_t)
 
 
 	chown := c.chown
 	chown := c.chown
+	uid, gid := getUidGid(fi)
+	old := &User{Uid: uid, Gid: gid}
 	if chown == nil {
 	if chown == nil {
-		uid, gid := getUidGid(fi)
-		chown = &ChownOpt{Uid: uid, Gid: gid}
+		chown = func(u *User) (*User, error) {
+			return u, nil
+		}
 	}
 	}
-	if err := Chown(name, chown); err != nil {
+	if err := Chown(name, old, chown); err != nil {
 		return errors.Wrapf(err, "failed to chown %s", name)
 		return errors.Wrapf(err, "failed to chown %s", name)
 	}
 	}
 
 
@@ -43,7 +45,7 @@ func (c *copier) copyFileInfo(fi os.FileInfo, name string) error {
 			return err
 			return err
 		}
 		}
 	} else {
 	} else {
-		timespec := []unix.Timespec{unix.Timespec(sys.StatAtime(st)), unix.Timespec(sys.StatMtime(st))}
+		timespec := []unix.Timespec{unix.Timespec(StatAtime(st)), unix.Timespec(StatMtime(st))}
 		if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
 		if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
 			return errors.Wrapf(err, "failed to utime %s", name)
 			return errors.Wrapf(err, "failed to utime %s", name)
 		}
 		}

+ 7 - 5
vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go

@@ -6,7 +6,6 @@ import (
 	"os"
 	"os"
 	"syscall"
 	"syscall"
 
 
-	"github.com/containerd/containerd/sys"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"golang.org/x/sys/unix"
 	"golang.org/x/sys/unix"
 )
 )
@@ -19,11 +18,14 @@ func getUidGid(fi os.FileInfo) (uid, gid int) {
 func (c *copier) copyFileInfo(fi os.FileInfo, name string) error {
 func (c *copier) copyFileInfo(fi os.FileInfo, name string) error {
 	st := fi.Sys().(*syscall.Stat_t)
 	st := fi.Sys().(*syscall.Stat_t)
 	chown := c.chown
 	chown := c.chown
+	uid, gid := getUidGid(fi)
+	old := &User{Uid: uid, Gid: gid}
 	if chown == nil {
 	if chown == nil {
-		uid, gid := getUidGid(fi)
-		chown = &ChownOpt{Uid: uid, Gid: gid}
+		chown = func(u *User) (*User, error) {
+			return u, nil
+		}
 	}
 	}
-	if err := Chown(name, chown); err != nil {
+	if err := Chown(name, old, chown); err != nil {
 		return errors.Wrapf(err, "failed to chown %s", name)
 		return errors.Wrapf(err, "failed to chown %s", name)
 	}
 	}
 
 
@@ -42,7 +44,7 @@ func (c *copier) copyFileInfo(fi os.FileInfo, name string) error {
 			return err
 			return err
 		}
 		}
 	} else {
 	} else {
-		timespec := []unix.Timespec{unix.Timespec(sys.StatAtime(st)), unix.Timespec(sys.StatMtime(st))}
+		timespec := []unix.Timespec{unix.Timespec(StatAtime(st)), unix.Timespec(StatMtime(st))}
 		if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
 		if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
 			return errors.Wrapf(err, "failed to utime %s", name)
 			return errors.Wrapf(err, "failed to utime %s", name)
 		}
 		}

+ 12 - 3
vendor/github.com/tonistiigi/fsutil/copy/mkdir.go

@@ -4,9 +4,18 @@ import (
 	"os"
 	"os"
 	"syscall"
 	"syscall"
 	"time"
 	"time"
+
+	"github.com/pkg/errors"
 )
 )
 
 
-func Chown(p string, user *ChownOpt) error {
+func Chown(p string, old *User, fn Chowner) error {
+	if fn == nil {
+		return nil
+	}
+	user, err := fn(old)
+	if err != nil {
+		return errors.WithStack(err)
+	}
 	if user != nil {
 	if user != nil {
 		if err := os.Lchown(p, user.Uid, user.Gid); err != nil {
 		if err := os.Lchown(p, user.Uid, user.Gid); err != nil {
 			return err
 			return err
@@ -16,7 +25,7 @@ func Chown(p string, user *ChownOpt) error {
 }
 }
 
 
 // MkdirAll is forked os.MkdirAll
 // MkdirAll is forked os.MkdirAll
-func MkdirAll(path string, perm os.FileMode, user *ChownOpt, tm *time.Time) error {
+func MkdirAll(path string, perm os.FileMode, user Chowner, tm *time.Time) error {
 	// Fast path: if we can tell whether path is a directory or file, stop with success or error.
 	// Fast path: if we can tell whether path is a directory or file, stop with success or error.
 	dir, err := os.Stat(path)
 	dir, err := os.Stat(path)
 	if err == nil {
 	if err == nil {
@@ -62,7 +71,7 @@ func MkdirAll(path string, perm os.FileMode, user *ChownOpt, tm *time.Time) erro
 		return err
 		return err
 	}
 	}
 
 
-	if err := Chown(path, user); err != nil {
+	if err := Chown(path, nil, user); err != nil {
 		return err
 		return err
 	}
 	}
 
 

+ 17 - 0
vendor/github.com/tonistiigi/fsutil/copy/stat_bsd.go

@@ -0,0 +1,17 @@
+// +build darwin freebsd netbsd openbsd
+
+package fs
+
+import (
+	"syscall"
+)
+
+// Returns the last-accessed time
+func StatAtime(st *syscall.Stat_t) syscall.Timespec {
+	return st.Atimespec
+}
+
+// Returns the last-modified time
+func StatMtime(st *syscall.Stat_t) syscall.Timespec {
+	return st.Mtimespec
+}

+ 17 - 0
vendor/github.com/tonistiigi/fsutil/copy/stat_sysv.go

@@ -0,0 +1,17 @@
+// +build dragonfly linux solaris
+
+package fs
+
+import (
+	"syscall"
+)
+
+// Returns the last-accessed time
+func StatAtime(st *syscall.Stat_t) syscall.Timespec {
+	return st.Atim
+}
+
+// Returns the last-modified time
+func StatMtime(st *syscall.Stat_t) syscall.Timespec {
+	return st.Mtim
+}

+ 0 - 2
vendor/github.com/tonistiigi/fsutil/go.mod

@@ -2,8 +2,6 @@ module github.com/tonistiigi/fsutil
 
 
 require (
 require (
 	github.com/Microsoft/go-winio v0.4.11 // indirect
 	github.com/Microsoft/go-winio v0.4.11 // indirect
-	github.com/Microsoft/hcsshim v0.8.5 // indirect
-	github.com/containerd/containerd v1.2.4
 	github.com/containerd/continuity v0.0.0-20181001140422-bd77b46c8352
 	github.com/containerd/continuity v0.0.0-20181001140422-bd77b46c8352
 	github.com/davecgh/go-spew v1.1.1 // indirect
 	github.com/davecgh/go-spew v1.1.1 // indirect
 	github.com/docker/docker v0.0.0-20180531152204-71cd53e4a197
 	github.com/docker/docker v0.0.0-20180531152204-71cd53e4a197

Some files were not shown because too many files changed in this diff