vendor: update buildkit to df35e9818
Update to new buildkit and fix upgrade bugs Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
parent
d984d3053b
commit
9b28939345
85 changed files with 4131 additions and 540 deletions
|
@ -395,7 +395,7 @@ func (s *snapshotter) View(ctx context.Context, key, parent string, opts ...snap
|
|||
}
|
||||
|
||||
func (s *snapshotter) Walk(context.Context, snapshots.WalkFunc, ...string) error {
|
||||
return errors.Errorf("not-implemented")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) {
|
||||
|
|
|
@ -86,7 +86,11 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
md, err := metadata.NewStore(filepath.Join(root, "metadata.db"))
|
||||
if err := cache.MigrateV2(context.Background(), filepath.Join(root, "metadata.db"), filepath.Join(root, "metadata_v2.db"), store, snapshotter, lm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
md, err := metadata.NewStore(filepath.Join(root, "metadata_v2.db"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -194,7 +194,8 @@ func (b *Builder) build(source builder.Source, dockerfile *parser.Result) (*buil
|
|||
|
||||
stages, metaArgs, err := instructions.Parse(dockerfile.AST)
|
||||
if err != nil {
|
||||
if instructions.IsUnknownInstruction(err) {
|
||||
var uiErr *instructions.UnknownInstruction
|
||||
if errors.As(err, &uiErr) {
|
||||
buildsFailed.WithValues(metricsUnknownInstructionError).Inc()
|
||||
}
|
||||
return nil, errdefs.InvalidParameter(err)
|
||||
|
|
|
@ -205,7 +205,8 @@ func dispatchTriggeredOnBuild(d dispatchRequest, triggers []string) error {
|
|||
}
|
||||
cmd, err := instructions.ParseCommand(ast.AST.Children[0])
|
||||
if err != nil {
|
||||
if instructions.IsUnknownInstruction(err) {
|
||||
var uiErr *instructions.UnknownInstruction
|
||||
if errors.As(err, &uiErr) {
|
||||
buildsFailed.WithValues(metricsUnknownInstructionError).Inc()
|
||||
}
|
||||
return err
|
||||
|
|
|
@ -6103,7 +6103,7 @@ func (s *DockerSuite) TestBuildLineErrorOnBuild(c *testing.T) {
|
|||
ONBUILD
|
||||
`)).Assert(c, icmd.Expected{
|
||||
ExitCode: 1,
|
||||
Err: "Dockerfile parse error line 2: ONBUILD requires at least one argument",
|
||||
Err: "parse error line 2: ONBUILD requires at least one argument",
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -6117,7 +6117,7 @@ func (s *DockerSuite) TestBuildLineErrorUnknownInstruction(c *testing.T) {
|
|||
ERROR
|
||||
`)).Assert(c, icmd.Expected{
|
||||
ExitCode: 1,
|
||||
Err: "Dockerfile parse error line 3: unknown instruction: NOINSTRUCTION",
|
||||
Err: "parse error line 3: unknown instruction: NOINSTRUCTION",
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -6134,7 +6134,7 @@ func (s *DockerSuite) TestBuildLineErrorWithEmptyLines(c *testing.T) {
|
|||
CMD ["/bin/init"]
|
||||
`)).Assert(c, icmd.Expected{
|
||||
ExitCode: 1,
|
||||
Err: "Dockerfile parse error line 6: unknown instruction: NOINSTRUCTION",
|
||||
Err: "parse error line 6: unknown instruction: NOINSTRUCTION",
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -6148,7 +6148,7 @@ func (s *DockerSuite) TestBuildLineErrorWithComments(c *testing.T) {
|
|||
NOINSTRUCTION echo ba
|
||||
`)).Assert(c, icmd.Expected{
|
||||
ExitCode: 1,
|
||||
Err: "Dockerfile parse error line 5: unknown instruction: NOINSTRUCTION",
|
||||
Err: "parse error line 5: unknown instruction: NOINSTRUCTION",
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ github.com/imdario/mergo 1afb36080aec31e0d1528973ebe6
|
|||
golang.org/x/sync cd5d95a43a6e21273425c7ae415d3df9ea832eeb
|
||||
|
||||
# buildkit
|
||||
github.com/moby/buildkit ae7ff7174f73bcb4df89b97e1623b3fb0bfb0a0c
|
||||
github.com/moby/buildkit df35e9818d1f9066e616e03f4b8d727c97562e5b
|
||||
github.com/tonistiigi/fsutil c2c7d7b0e1441705cd802e5699c0a10b1dfe39fd
|
||||
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
|
||||
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
|
||||
|
@ -36,6 +36,7 @@ github.com/google/shlex e7afc7fbc51079733e9468cdfd1e
|
|||
github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc
|
||||
github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
|
||||
github.com/gofrs/flock 392e7fae8f1b0bdbd67dad7237d23f618feb6dbb # v0.7.1
|
||||
github.com/grpc-ecosystem/go-grpc-middleware 3c51f7f332123e8be5a157c0802a228ac85bf9db # v1.2.0
|
||||
|
||||
# libnetwork
|
||||
|
||||
|
|
201
vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE
generated
vendored
Normal file
201
vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
85
vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
generated
vendored
Normal file
85
vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
# Go gRPC Middleware
|
||||
|
||||
[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-middleware.svg?branch=master)](https://travis-ci.org/grpc-ecosystem/go-grpc-middleware)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-middleware)](https://goreportcard.com/report/github.com/grpc-ecosystem/go-grpc-middleware)
|
||||
[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware)
|
||||
[![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-middleware/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-middleware/?badge)
|
||||
[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware)
|
||||
[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE)
|
||||
[![quality: production](https://img.shields.io/badge/quality-production-orange.svg)](#status)
|
||||
[![Slack](https://img.shields.io/badge/slack-%23grpc--middleware-brightgreen)](https://slack.com/share/IRUQCFC23/9Tm7hxRFVKKNoajQfMOcUiIk/enQtODc4ODI4NTIyMDcxLWM5NDA0ZTE4Njg5YjRjYWZkMTI5MzQwNDY3YzBjMzE1YzdjOGM5ZjI1NDNiM2JmNzI2YjM5ODE5OTRiNTEyOWE)
|
||||
|
||||
[gRPC Go](https://github.com/grpc/grpc-go) Middleware: interceptors, helpers, utilities.
|
||||
|
||||
## Middleware
|
||||
|
||||
[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for
|
||||
Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs)
|
||||
that is executed either on the gRPC Server before the request is passed onto the user's application logic, or on the gRPC client either around the user call. It is a perfect way to implement
|
||||
common patterns: auth, logging, message, validation, retries or monitoring.
|
||||
|
||||
These are generic building blocks that make it easy to build multiple microservices easily.
|
||||
The purpose of this repository is to act as a go-to point for such reusable functionality. It contains
|
||||
some of them itself, but also will link to useful external repos.
|
||||
|
||||
`grpc_middleware` itself provides support for chaining interceptors, here's an example:
|
||||
|
||||
```go
|
||||
import "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
|
||||
myServer := grpc.NewServer(
|
||||
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
|
||||
grpc_ctxtags.StreamServerInterceptor(),
|
||||
grpc_opentracing.StreamServerInterceptor(),
|
||||
grpc_prometheus.StreamServerInterceptor,
|
||||
grpc_zap.StreamServerInterceptor(zapLogger),
|
||||
grpc_auth.StreamServerInterceptor(myAuthFunction),
|
||||
grpc_recovery.StreamServerInterceptor(),
|
||||
)),
|
||||
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
|
||||
grpc_ctxtags.UnaryServerInterceptor(),
|
||||
grpc_opentracing.UnaryServerInterceptor(),
|
||||
grpc_prometheus.UnaryServerInterceptor,
|
||||
grpc_zap.UnaryServerInterceptor(zapLogger),
|
||||
grpc_auth.UnaryServerInterceptor(myAuthFunction),
|
||||
grpc_recovery.UnaryServerInterceptor(),
|
||||
)),
|
||||
)
|
||||
```
|
||||
|
||||
## Interceptors
|
||||
|
||||
*Please send a PR to add new interceptors or middleware to this list*
|
||||
|
||||
#### Auth
|
||||
* [`grpc_auth`](auth) - a customizable (via `AuthFunc`) piece of auth middleware
|
||||
|
||||
#### Logging
|
||||
* [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body
|
||||
* [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers.
|
||||
* [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers.
|
||||
* [`grpc_kit`](logging/kit/) - integration of [go-kit](https://github.com/go-kit/kit/tree/master/log) logging library into gRPC handlers.
|
||||
|
||||
#### Monitoring
|
||||
* [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware
|
||||
* [`otgrpc`⚡](https://github.com/grpc-ecosystem/grpc-opentracing/tree/master/go/otgrpc) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors
|
||||
* [`grpc_opentracing`](tracing/opentracing) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors with support for streaming and handler-returned tags
|
||||
|
||||
#### Client
|
||||
* [`grpc_retry`](retry/) - a generic gRPC response code retry mechanism, client-side middleware
|
||||
|
||||
#### Server
|
||||
* [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options
|
||||
* [`grpc_recovery`](recovery/) - turn panics into gRPC errors
|
||||
* [`ratelimit`](ratelimit/) - grpc rate limiting by your own limiter
|
||||
|
||||
|
||||
## Status
|
||||
|
||||
This code has been running in *production* since May 2016 as the basis of the gRPC micro services stack at [Improbable](https://improbable.io).
|
||||
|
||||
Additional tooling will be added, and contributions are welcome.
|
||||
|
||||
## License
|
||||
|
||||
`go-grpc-middleware` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
|
120
vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
generated
vendored
Normal file
120
vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
generated
vendored
Normal file
|
@ -0,0 +1,120 @@
|
|||
// Copyright 2016 Michal Witkowski. All Rights Reserved.
|
||||
// See LICENSE for licensing terms.
|
||||
|
||||
// gRPC Server Interceptor chaining middleware.
|
||||
|
||||
package grpc_middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// ChainUnaryServer creates a single interceptor out of a chain of many interceptors.
|
||||
//
|
||||
// Execution is done in left-to-right order, including passing of context.
|
||||
// For example ChainUnaryServer(one, two, three) will execute one before two before three, and three
|
||||
// will see context changes of one and two.
|
||||
func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor {
|
||||
n := len(interceptors)
|
||||
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
chainer := func(currentInter grpc.UnaryServerInterceptor, currentHandler grpc.UnaryHandler) grpc.UnaryHandler {
|
||||
return func(currentCtx context.Context, currentReq interface{}) (interface{}, error) {
|
||||
return currentInter(currentCtx, currentReq, info, currentHandler)
|
||||
}
|
||||
}
|
||||
|
||||
chainedHandler := handler
|
||||
for i := n - 1; i >= 0; i-- {
|
||||
chainedHandler = chainer(interceptors[i], chainedHandler)
|
||||
}
|
||||
|
||||
return chainedHandler(ctx, req)
|
||||
}
|
||||
}
|
||||
|
||||
// ChainStreamServer creates a single interceptor out of a chain of many interceptors.
|
||||
//
|
||||
// Execution is done in left-to-right order, including passing of context.
|
||||
// For example ChainUnaryServer(one, two, three) will execute one before two before three.
|
||||
// If you want to pass context between interceptors, use WrapServerStream.
|
||||
func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor {
|
||||
n := len(interceptors)
|
||||
|
||||
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
chainer := func(currentInter grpc.StreamServerInterceptor, currentHandler grpc.StreamHandler) grpc.StreamHandler {
|
||||
return func(currentSrv interface{}, currentStream grpc.ServerStream) error {
|
||||
return currentInter(currentSrv, currentStream, info, currentHandler)
|
||||
}
|
||||
}
|
||||
|
||||
chainedHandler := handler
|
||||
for i := n - 1; i >= 0; i-- {
|
||||
chainedHandler = chainer(interceptors[i], chainedHandler)
|
||||
}
|
||||
|
||||
return chainedHandler(srv, ss)
|
||||
}
|
||||
}
|
||||
|
||||
// ChainUnaryClient creates a single interceptor out of a chain of many interceptors.
|
||||
//
|
||||
// Execution is done in left-to-right order, including passing of context.
|
||||
// For example ChainUnaryClient(one, two, three) will execute one before two before three.
|
||||
func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor {
|
||||
n := len(interceptors)
|
||||
|
||||
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||
chainer := func(currentInter grpc.UnaryClientInterceptor, currentInvoker grpc.UnaryInvoker) grpc.UnaryInvoker {
|
||||
return func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
|
||||
return currentInter(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentInvoker, currentOpts...)
|
||||
}
|
||||
}
|
||||
|
||||
chainedInvoker := invoker
|
||||
for i := n - 1; i >= 0; i-- {
|
||||
chainedInvoker = chainer(interceptors[i], chainedInvoker)
|
||||
}
|
||||
|
||||
return chainedInvoker(ctx, method, req, reply, cc, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ChainStreamClient creates a single interceptor out of a chain of many interceptors.
|
||||
//
|
||||
// Execution is done in left-to-right order, including passing of context.
|
||||
// For example ChainStreamClient(one, two, three) will execute one before two before three.
|
||||
func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor {
|
||||
n := len(interceptors)
|
||||
|
||||
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
chainer := func(currentInter grpc.StreamClientInterceptor, currentStreamer grpc.Streamer) grpc.Streamer {
|
||||
return func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
return currentInter(currentCtx, currentDesc, currentConn, currentMethod, currentStreamer, currentOpts...)
|
||||
}
|
||||
}
|
||||
|
||||
chainedStreamer := streamer
|
||||
for i := n - 1; i >= 0; i-- {
|
||||
chainedStreamer = chainer(interceptors[i], chainedStreamer)
|
||||
}
|
||||
|
||||
return chainedStreamer(ctx, desc, cc, method, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// Chain creates a single interceptor out of a chain of many interceptors.
|
||||
//
|
||||
// WithUnaryServerChain is a grpc.Server config option that accepts multiple unary interceptors.
|
||||
// Basically syntactic sugar.
|
||||
func WithUnaryServerChain(interceptors ...grpc.UnaryServerInterceptor) grpc.ServerOption {
|
||||
return grpc.UnaryInterceptor(ChainUnaryServer(interceptors...))
|
||||
}
|
||||
|
||||
// WithStreamServerChain is a grpc.Server config option that accepts multiple stream interceptors.
|
||||
// Basically syntactic sugar.
|
||||
func WithStreamServerChain(interceptors ...grpc.StreamServerInterceptor) grpc.ServerOption {
|
||||
return grpc.StreamInterceptor(ChainStreamServer(interceptors...))
|
||||
}
|
69
vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go
generated
vendored
Normal file
69
vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
|||
// Copyright 2016 Michal Witkowski. All Rights Reserved.
|
||||
// See LICENSE for licensing terms.
|
||||
|
||||
/*
|
||||
`grpc_middleware` is a collection of gRPC middleware packages: interceptors, helpers and tools.
|
||||
|
||||
Middleware
|
||||
|
||||
gRPC is a fantastic RPC middleware, which sees a lot of adoption in the Golang world. However, the
|
||||
upstream gRPC codebase is relatively bare bones.
|
||||
|
||||
This package, and most of its child packages provides commonly needed middleware for gRPC:
|
||||
client-side interceptors for retires, server-side interceptors for input validation and auth,
|
||||
functions for chaining said interceptors, metadata convenience methods and more.
|
||||
|
||||
Chaining
|
||||
|
||||
By default, gRPC doesn't allow one to have more than one interceptor either on the client nor on
|
||||
the server side. `grpc_middleware` provides convenient chaining methods
|
||||
|
||||
Simple way of turning a multiple interceptors into a single interceptor. Here's an example for
|
||||
server chaining:
|
||||
|
||||
myServer := grpc.NewServer(
|
||||
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(loggingStream, monitoringStream, authStream)),
|
||||
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(loggingUnary, monitoringUnary, authUnary),
|
||||
)
|
||||
|
||||
These interceptors will be executed from left to right: logging, monitoring and auth.
|
||||
|
||||
Here's an example for client side chaining:
|
||||
|
||||
clientConn, err = grpc.Dial(
|
||||
address,
|
||||
grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(monitoringClientUnary, retryUnary)),
|
||||
grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(monitoringClientStream, retryStream)),
|
||||
)
|
||||
client = pb_testproto.NewTestServiceClient(clientConn)
|
||||
resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"})
|
||||
|
||||
These interceptors will be executed from left to right: monitoring and then retry logic.
|
||||
|
||||
The retry interceptor will call every interceptor that follows it whenever when a retry happens.
|
||||
|
||||
Writing Your Own
|
||||
|
||||
Implementing your own interceptor is pretty trivial: there are interfaces for that. But the interesting
|
||||
bit exposing common data to handlers (and other middleware), similarly to HTTP Middleware design.
|
||||
For example, you may want to pass the identity of the caller from the auth interceptor all the way
|
||||
to the handling function.
|
||||
|
||||
For example, a client side interceptor example for auth looks like:
|
||||
|
||||
func FakeAuthUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
newCtx := context.WithValue(ctx, "user_id", "john@example.com")
|
||||
return handler(newCtx, req)
|
||||
}
|
||||
|
||||
Unfortunately, it's not as easy for streaming RPCs. These have the `context.Context` embedded within
|
||||
the `grpc.ServerStream` object. To pass values through context, a wrapper (`WrappedServerStream`) is
|
||||
needed. For example:
|
||||
|
||||
func FakeAuthStreamingInterceptor(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
newStream := grpc_middleware.WrapServerStream(stream)
|
||||
newStream.WrappedContext = context.WithValue(ctx, "user_id", "john@example.com")
|
||||
return handler(srv, stream)
|
||||
}
|
||||
*/
|
||||
package grpc_middleware
|
22
vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.mod
generated
vendored
Normal file
22
vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
module github.com/grpc-ecosystem/go-grpc-middleware
|
||||
|
||||
require (
|
||||
github.com/go-kit/kit v0.9.0
|
||||
github.com/go-logfmt/logfmt v0.4.0 // indirect
|
||||
github.com/go-stack/stack v1.8.0 // indirect
|
||||
github.com/gogo/protobuf v1.2.1
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/opentracing/opentracing-go v1.1.0
|
||||
github.com/pkg/errors v0.8.1 // indirect
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/stretchr/testify v1.4.0
|
||||
go.uber.org/atomic v1.4.0 // indirect
|
||||
go.uber.org/multierr v1.1.0 // indirect
|
||||
go.uber.org/zap v1.10.0
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 // indirect
|
||||
google.golang.org/grpc v1.19.0
|
||||
)
|
||||
|
||||
go 1.13
|
30
vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go
generated
vendored
Normal file
30
vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
// Copyright 2016 Michal Witkowski. All Rights Reserved.
|
||||
// See LICENSE for licensing terms.
|
||||
|
||||
package grpc_middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// WrappedServerStream is a thin wrapper around grpc.ServerStream that allows modifying context.
|
||||
type WrappedServerStream struct {
|
||||
grpc.ServerStream
|
||||
// WrappedContext is the wrapper's own Context. You can assign it.
|
||||
WrappedContext context.Context
|
||||
}
|
||||
|
||||
// Context returns the wrapper's WrappedContext, overwriting the nested grpc.ServerStream.Context()
|
||||
func (w *WrappedServerStream) Context() context.Context {
|
||||
return w.WrappedContext
|
||||
}
|
||||
|
||||
// WrapServerStream returns a ServerStream that has the ability to overwrite context.
|
||||
func WrapServerStream(stream grpc.ServerStream) *WrappedServerStream {
|
||||
if existing, ok := stream.(*WrappedServerStream); ok {
|
||||
return existing
|
||||
}
|
||||
return &WrappedServerStream{ServerStream: stream, WrappedContext: stream.Context()}
|
||||
}
|
2
vendor/github.com/moby/buildkit/README.md
generated
vendored
2
vendor/github.com/moby/buildkit/README.md
generated
vendored
|
@ -86,6 +86,7 @@ BuildKit is used by the following projects:
|
|||
- [PouchContainer](https://github.com/alibaba/pouch)
|
||||
- [Docker buildx](https://github.com/docker/buildx)
|
||||
- [Okteto Cloud](https://okteto.com/)
|
||||
- [Earthly earthfiles](https://github.com/vladaionescu/earthly)
|
||||
|
||||
## Quick start
|
||||
|
||||
|
@ -148,6 +149,7 @@ Currently, the following high-level languages has been implemented for LLB:
|
|||
- [Buildpacks](https://github.com/tonistiigi/buildkit-pack)
|
||||
- [Mockerfile](https://matt-rickard.com/building-a-new-dockerfile-frontend/)
|
||||
- [Gockerfile](https://github.com/po3rin/gockerfile)
|
||||
- [bldr (Pkgfile)](https://github.com/talos-systems/bldr/)
|
||||
- (open a PR to add your own language)
|
||||
|
||||
### Exploring Dockerfiles
|
||||
|
|
5
vendor/github.com/moby/buildkit/cache/contenthash/path.go
generated
vendored
5
vendor/github.com/moby/buildkit/cache/contenthash/path.go
generated
vendored
|
@ -1,9 +1,10 @@
|
|||
package contenthash
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -52,7 +53,7 @@ func walkLink(root, path string, linksWalked *int, cb onSymlinkFunc) (newpath st
|
|||
fi, err := os.Lstat(realPath)
|
||||
if err != nil {
|
||||
// If path does not yet exist, treat as non-symlink
|
||||
if os.IsNotExist(err) {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return path, false, nil
|
||||
}
|
||||
return "", false, err
|
||||
|
|
2
vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go
generated
vendored
2
vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go
generated
vendored
|
@ -39,7 +39,7 @@ func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
|
|||
// Get extended attributes.
|
||||
xAttrKeys := make([]string, len(h.Xattrs))
|
||||
for k := range h.Xattrs {
|
||||
if !strings.HasPrefix(k, "security.") && !strings.HasPrefix(k, "system.") {
|
||||
if k == "security.capability" || !strings.HasPrefix(k, "security.") && !strings.HasPrefix(k, "system.") {
|
||||
xAttrKeys = append(xAttrKeys, k)
|
||||
}
|
||||
}
|
||||
|
|
14
vendor/github.com/moby/buildkit/cache/manager.go
generated
vendored
14
vendor/github.com/moby/buildkit/cache/manager.go
generated
vendored
|
@ -16,7 +16,7 @@ import (
|
|||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imagespecidentity "github.com/opencontainers/image-spec/identity"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -143,7 +143,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor,
|
|||
|
||||
for _, si := range sis {
|
||||
ref, err := cm.get(ctx, si.ID(), opts...)
|
||||
if err != nil && errors.Cause(err) != errNotFound {
|
||||
if err != nil && !IsNotFound(err) {
|
||||
return nil, errors.Wrapf(err, "failed to get record %s by blobchainid", si.ID())
|
||||
}
|
||||
if p != nil {
|
||||
|
@ -160,7 +160,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor,
|
|||
var link ImmutableRef
|
||||
for _, si := range sis {
|
||||
ref, err := cm.get(ctx, si.ID(), opts...)
|
||||
if err != nil && errors.Cause(err) != errNotFound {
|
||||
if err != nil && !IsNotFound(err) {
|
||||
return nil, errors.Wrapf(err, "failed to get record %s by chainid", si.ID())
|
||||
}
|
||||
link = ref
|
||||
|
@ -338,7 +338,7 @@ func (cm *cacheManager) getRecord(ctx context.Context, id string, opts ...RefOpt
|
|||
mutable, err := cm.getRecord(ctx, mutableID)
|
||||
if err != nil {
|
||||
// check loading mutable deleted record from disk
|
||||
if errors.Cause(err) == errNotFound {
|
||||
if IsNotFound(err) {
|
||||
cm.md.Clear(id)
|
||||
}
|
||||
return nil, err
|
||||
|
@ -906,12 +906,8 @@ func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo)
|
|||
return du, nil
|
||||
}
|
||||
|
||||
func IsLocked(err error) bool {
|
||||
return errors.Cause(err) == ErrLocked
|
||||
}
|
||||
|
||||
func IsNotFound(err error) bool {
|
||||
return errors.Cause(err) == errNotFound
|
||||
return errors.Is(err, errNotFound)
|
||||
}
|
||||
|
||||
type RefOption interface{}
|
||||
|
|
37
vendor/github.com/moby/buildkit/cache/migrate_v2.go
generated
vendored
37
vendor/github.com/moby/buildkit/cache/migrate_v2.go
generated
vendored
|
@ -13,7 +13,7 @@ import (
|
|||
"github.com/containerd/containerd/snapshots"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
@ -56,7 +56,7 @@ func migrateChainID(si *metadata.StorageItem, all map[string]*metadata.StorageIt
|
|||
func MigrateV2(ctx context.Context, from, to string, cs content.Store, s snapshot.Snapshotter, lm leases.Manager) error {
|
||||
_, err := os.Stat(to)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(errors.Cause(err)) {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
} else {
|
||||
|
@ -65,7 +65,7 @@ func MigrateV2(ctx context.Context, from, to string, cs content.Store, s snapsho
|
|||
|
||||
_, err = os.Stat(from)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(errors.Cause(err)) {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
|
@ -180,7 +180,7 @@ func MigrateV2(ctx context.Context, from, to string, cs content.Store, s snapsho
|
|||
})
|
||||
if err != nil {
|
||||
// if we are running the migration twice
|
||||
if errdefs.IsAlreadyExists(err) {
|
||||
if errors.Is(err, errdefs.ErrAlreadyExists) {
|
||||
continue
|
||||
}
|
||||
return errors.Wrap(err, "failed to create lease")
|
||||
|
@ -205,19 +205,22 @@ func MigrateV2(ctx context.Context, from, to string, cs content.Store, s snapsho
|
|||
|
||||
// remove old root labels
|
||||
for _, item := range byID {
|
||||
if _, err := s.Update(ctx, snapshots.Info{
|
||||
Name: getSnapshotID(item),
|
||||
}, "labels.containerd.io/gc.root"); err != nil {
|
||||
if !errdefs.IsNotFound(errors.Cause(err)) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if blob := getBlob(item); blob != "" {
|
||||
if _, err := cs.Update(ctx, content.Info{
|
||||
Digest: digest.Digest(blob),
|
||||
em := getEqualMutable(item)
|
||||
if em == "" {
|
||||
if _, err := s.Update(ctx, snapshots.Info{
|
||||
Name: getSnapshotID(item),
|
||||
}, "labels.containerd.io/gc.root"); err != nil {
|
||||
return err
|
||||
if !errors.Is(err, errdefs.ErrNotFound) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if blob := getBlob(item); blob != "" {
|
||||
if _, err := cs.Update(ctx, content.Info{
|
||||
Digest: digest.Digest(blob),
|
||||
}, "labels.containerd.io/gc.root"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -228,7 +231,7 @@ func MigrateV2(ctx context.Context, from, to string, cs content.Store, s snapsho
|
|||
if _, err := s.Update(ctx, snapshots.Info{
|
||||
Name: info.Name,
|
||||
}, "labels.containerd.io/gc.root"); err != nil {
|
||||
if !errdefs.IsNotFound(errors.Cause(err)) {
|
||||
if !errors.Is(err, errdefs.ErrNotFound) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
13
vendor/github.com/moby/buildkit/cache/refs.go
generated
vendored
13
vendor/github.com/moby/buildkit/cache/refs.go
generated
vendored
|
@ -17,7 +17,7 @@ import (
|
|||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/util/flightcontrol"
|
||||
"github.com/moby/buildkit/util/leaseutil"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imagespecidentity "github.com/opencontainers/image-spec/identity"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -160,7 +160,7 @@ func (cr *cacheRecord) Size(ctx context.Context) (int64, error) {
|
|||
if isDead {
|
||||
return int64(0), nil
|
||||
}
|
||||
if !errdefs.IsNotFound(err) {
|
||||
if !errors.Is(err, errdefs.ErrNotFound) {
|
||||
return s, errors.Wrapf(err, "failed to get usage for %s", cr.ID())
|
||||
}
|
||||
}
|
||||
|
@ -180,7 +180,10 @@ func (cr *cacheRecord) Size(ctx context.Context) (int64, error) {
|
|||
cr.mu.Unlock()
|
||||
return usage.Size, nil
|
||||
})
|
||||
return s.(int64), err
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return s.(int64), nil
|
||||
}
|
||||
|
||||
func (cr *cacheRecord) Parent() ImmutableRef {
|
||||
|
@ -349,7 +352,7 @@ func (sr *immutableRef) Extract(ctx context.Context) error {
|
|||
return nil, err
|
||||
}
|
||||
if err := sr.cm.Snapshotter.Commit(ctx, getSnapshotID(sr.md), key); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
if !errors.Is(err, errdefs.ErrAlreadyExists) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
@ -506,7 +509,7 @@ func (cr *cacheRecord) finalize(ctx context.Context, commit bool) error {
|
|||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) { // migrator adds leases for everything
|
||||
if !errors.Is(err, errdefs.ErrAlreadyExists) { // migrator adds leases for everything
|
||||
return errors.Wrap(err, "failed to create lease")
|
||||
}
|
||||
}
|
||||
|
|
2
vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go
generated
vendored
2
vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go
generated
vendored
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/util/contentutil"
|
||||
"github.com/moby/buildkit/util/resolver"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
|
|
27
vendor/github.com/moby/buildkit/client/client.go
generated
vendored
27
vendor/github.com/moby/buildkit/client/client.go
generated
vendored
|
@ -8,12 +8,14 @@ import (
|
|||
"net"
|
||||
"time"
|
||||
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/moby/buildkit/client/connhelper"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/grpchijack"
|
||||
"github.com/moby/buildkit/util/appdefaults"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
|
@ -31,6 +33,10 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
|
|||
gopts := []grpc.DialOption{}
|
||||
needDialer := true
|
||||
needWithInsecure := true
|
||||
|
||||
var unary []grpc.UnaryClientInterceptor
|
||||
var stream []grpc.StreamClientInterceptor
|
||||
|
||||
for _, o := range opts {
|
||||
if _, ok := o.(*withFailFast); ok {
|
||||
gopts = append(gopts, grpc.FailOnNonTempDialError(true))
|
||||
|
@ -44,9 +50,8 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
|
|||
needWithInsecure = false
|
||||
}
|
||||
if wt, ok := o.(*withTracer); ok {
|
||||
gopts = append(gopts,
|
||||
grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(wt.tracer, otgrpc.LogPayloads())),
|
||||
grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(wt.tracer)))
|
||||
unary = append(unary, otgrpc.OpenTracingClientInterceptor(wt.tracer, otgrpc.LogPayloads()))
|
||||
stream = append(stream, otgrpc.OpenTracingStreamClientInterceptor(wt.tracer))
|
||||
}
|
||||
if wd, ok := o.(*withDialer); ok {
|
||||
gopts = append(gopts, grpc.WithDialer(wd.dialer))
|
||||
|
@ -68,6 +73,22 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
|
|||
if address == "" {
|
||||
address = appdefaults.Address
|
||||
}
|
||||
|
||||
unary = append(unary, grpcerrors.UnaryClientInterceptor)
|
||||
stream = append(stream, grpcerrors.StreamClientInterceptor)
|
||||
|
||||
if len(unary) == 1 {
|
||||
gopts = append(gopts, grpc.WithUnaryInterceptor(unary[0]))
|
||||
} else if len(unary) > 1 {
|
||||
gopts = append(gopts, grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(unary...)))
|
||||
}
|
||||
|
||||
if len(stream) == 1 {
|
||||
gopts = append(gopts, grpc.WithStreamInterceptor(stream[0]))
|
||||
} else if len(stream) > 1 {
|
||||
gopts = append(gopts, grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(stream...)))
|
||||
}
|
||||
|
||||
conn, err := grpc.DialContext(ctx, address, gopts...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to dial %q . make sure buildkitd is running", address)
|
||||
|
|
3
vendor/github.com/moby/buildkit/client/connhelper/connhelper.go
generated
vendored
3
vendor/github.com/moby/buildkit/client/connhelper/connhelper.go
generated
vendored
|
@ -1,4 +1,5 @@
|
|||
// Package connhelper provides helpers for connecting to a remote daemon host with custom logic.
|
||||
// Package connhelper provides helpers for connecting to a remote daemon host
|
||||
// with custom logic.
|
||||
package connhelper
|
||||
|
||||
import (
|
||||
|
|
6
vendor/github.com/moby/buildkit/client/llb/async.go
generated
vendored
6
vendor/github.com/moby/buildkit/client/llb/async.go
generated
vendored
|
@ -61,7 +61,7 @@ func (as *asyncState) Do(ctx context.Context) error {
|
|||
if err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if errors.Cause(err) == ctx.Err() {
|
||||
if errors.Is(err, ctx.Err()) {
|
||||
return res, err
|
||||
}
|
||||
default:
|
||||
|
@ -85,8 +85,8 @@ type errVertex struct {
|
|||
func (v *errVertex) Validate(context.Context) error {
|
||||
return v.err
|
||||
}
|
||||
func (v *errVertex) Marshal(context.Context, *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) {
|
||||
return "", nil, nil, v.err
|
||||
func (v *errVertex) Marshal(context.Context, *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) {
|
||||
return "", nil, nil, nil, v.err
|
||||
}
|
||||
func (v *errVertex) Output() Output {
|
||||
return nil
|
||||
|
|
42
vendor/github.com/moby/buildkit/client/llb/definition.go
generated
vendored
42
vendor/github.com/moby/buildkit/client/llb/definition.go
generated
vendored
|
@ -20,6 +20,7 @@ type DefinitionOp struct {
|
|||
ops map[digest.Digest]*pb.Op
|
||||
defs map[digest.Digest][]byte
|
||||
metas map[digest.Digest]pb.OpMetadata
|
||||
sources map[digest.Digest][]*SourceLocation
|
||||
platforms map[digest.Digest]*specs.Platform
|
||||
dgst digest.Digest
|
||||
index pb.OutputIndex
|
||||
|
@ -49,6 +50,38 @@ func NewDefinitionOp(def *pb.Definition) (*DefinitionOp, error) {
|
|||
platforms[dgst] = platform
|
||||
}
|
||||
|
||||
srcs := map[digest.Digest][]*SourceLocation{}
|
||||
|
||||
if def.Source != nil {
|
||||
sourceMaps := make([]*SourceMap, len(def.Source.Infos))
|
||||
for i, info := range def.Source.Infos {
|
||||
var st *State
|
||||
sdef := info.Definition
|
||||
if sdef != nil {
|
||||
op, err := NewDefinitionOp(sdef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state := NewState(op)
|
||||
st = &state
|
||||
}
|
||||
sourceMaps[i] = NewSourceMap(st, info.Filename, info.Data)
|
||||
}
|
||||
|
||||
for dgst, locs := range def.Source.Locations {
|
||||
for _, loc := range locs.Locations {
|
||||
if loc.SourceIndex < 0 || int(loc.SourceIndex) >= len(sourceMaps) {
|
||||
return nil, errors.Errorf("failed to find source map with index %d", loc.SourceIndex)
|
||||
}
|
||||
|
||||
srcs[digest.Digest(dgst)] = append(srcs[digest.Digest(dgst)], &SourceLocation{
|
||||
SourceMap: sourceMaps[int(loc.SourceIndex)],
|
||||
Ranges: loc.Ranges,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var index pb.OutputIndex
|
||||
if dgst != "" {
|
||||
index = ops[dgst].Inputs[0].Index
|
||||
|
@ -59,6 +92,7 @@ func NewDefinitionOp(def *pb.Definition) (*DefinitionOp, error) {
|
|||
ops: ops,
|
||||
defs: defs,
|
||||
metas: def.Metadata,
|
||||
sources: srcs,
|
||||
platforms: platforms,
|
||||
dgst: dgst,
|
||||
index: index,
|
||||
|
@ -110,20 +144,20 @@ func (d *DefinitionOp) Validate(context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *DefinitionOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) {
|
||||
func (d *DefinitionOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) {
|
||||
if d.dgst == "" {
|
||||
return "", nil, nil, errors.Errorf("cannot marshal empty definition op")
|
||||
return "", nil, nil, nil, errors.Errorf("cannot marshal empty definition op")
|
||||
}
|
||||
|
||||
if err := d.Validate(ctx); err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
meta := d.metas[d.dgst]
|
||||
return d.dgst, d.defs[d.dgst], &meta, nil
|
||||
return d.dgst, d.defs[d.dgst], &meta, d.sources[d.dgst], nil
|
||||
|
||||
}
|
||||
|
||||
|
|
32
vendor/github.com/moby/buildkit/client/llb/exec.go
generated
vendored
32
vendor/github.com/moby/buildkit/client/llb/exec.go
generated
vendored
|
@ -81,7 +81,7 @@ func (e *ExecOp) AddMount(target string, source Output, opt ...MountOption) Outp
|
|||
}
|
||||
m.output = o
|
||||
}
|
||||
e.Store(nil, nil, nil)
|
||||
e.Store(nil, nil, nil, nil)
|
||||
e.isValidated = false
|
||||
return m.output
|
||||
}
|
||||
|
@ -124,12 +124,12 @@ func (e *ExecOp) Validate(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) {
|
||||
func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) {
|
||||
if e.Cached(c) {
|
||||
return e.Load()
|
||||
}
|
||||
if err := e.Validate(ctx); err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
// make sure mounts are sorted
|
||||
sort.Slice(e.mounts, func(i, j int) bool {
|
||||
|
@ -138,7 +138,7 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
|
|||
|
||||
env, err := getEnv(e.base)(ctx)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
|
||||
if len(e.ssh) > 0 {
|
||||
|
@ -161,17 +161,17 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
|
|||
|
||||
args, err := getArgs(e.base)(ctx)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
|
||||
cwd, err := getDir(e.base)(ctx)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
|
||||
user, err := getUser(e.base)(ctx)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
|
||||
meta := &pb.Meta{
|
||||
|
@ -182,7 +182,7 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
|
|||
}
|
||||
extraHosts, err := getExtraHosts(e.base)(ctx)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
if len(extraHosts) > 0 {
|
||||
hosts := make([]*pb.HostIP, len(extraHosts))
|
||||
|
@ -194,12 +194,12 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
|
|||
|
||||
network, err := getNetwork(e.base)(ctx)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
|
||||
security, err := getSecurity(e.base)(ctx)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
|
||||
peo := &pb.ExecOp{
|
||||
|
@ -252,7 +252,7 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
|
|||
if e.constraints.Platform == nil {
|
||||
p, err := getPlatform(e.base)(ctx)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
e.constraints.Platform = p
|
||||
}
|
||||
|
@ -267,11 +267,11 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
|
|||
inputIndex := pb.InputIndex(len(pop.Inputs))
|
||||
if m.source != nil {
|
||||
if m.tmpfs {
|
||||
return "", nil, nil, errors.Errorf("tmpfs mounts must use scratch")
|
||||
return "", nil, nil, nil, errors.Errorf("tmpfs mounts must use scratch")
|
||||
}
|
||||
inp, err := m.source.ToInput(ctx, c)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
|
||||
newInput := true
|
||||
|
@ -356,9 +356,9 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
|
|||
|
||||
dt, err := pop.Marshal()
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
e.Store(dt, md, c)
|
||||
e.Store(dt, md, e.constraints.SourceLocations, c)
|
||||
return e.Load()
|
||||
}
|
||||
|
||||
|
@ -388,7 +388,7 @@ func (e *ExecOp) getMountIndexFn(m *mount) func() (pb.OutputIndex, error) {
|
|||
|
||||
i := 0
|
||||
for _, m2 := range e.mounts {
|
||||
if m2.noOutput || m2.readonly || m2.cacheID != "" {
|
||||
if m2.noOutput || m2.readonly || m2.tmpfs || m2.cacheID != "" {
|
||||
continue
|
||||
}
|
||||
if m == m2 {
|
||||
|
|
14
vendor/github.com/moby/buildkit/client/llb/fileop.go
generated
vendored
14
vendor/github.com/moby/buildkit/client/llb/fileop.go
generated
vendored
|
@ -649,12 +649,12 @@ func (ms *marshalState) add(fa *FileAction, c *Constraints) (*fileActionState, e
|
|||
return st, nil
|
||||
}
|
||||
|
||||
func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) {
|
||||
func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) {
|
||||
if f.Cached(c) {
|
||||
return f.Load()
|
||||
}
|
||||
if err := f.Validate(ctx); err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
|
||||
addCap(&f.constraints, pb.CapFileBase)
|
||||
|
@ -669,7 +669,7 @@ func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
|
|||
state := newMarshalState(ctx)
|
||||
_, err := state.add(f.action, c)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
pop.Inputs = state.inputs
|
||||
|
||||
|
@ -683,13 +683,13 @@ func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
|
|||
if st.fa.state != nil {
|
||||
parent, err = st.fa.state.GetDir(ctx)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
action, err := st.action.toProtoAction(ctx, parent, st.base)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
|
||||
pfo.Actions = append(pfo.Actions, &pb.FileAction{
|
||||
|
@ -702,9 +702,9 @@ func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
|
|||
|
||||
dt, err := pop.Marshal()
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
f.Store(dt, md, c)
|
||||
f.Store(dt, md, f.constraints.SourceLocations, c)
|
||||
return f.Load()
|
||||
}
|
||||
|
||||
|
|
13
vendor/github.com/moby/buildkit/client/llb/marshal.go
generated
vendored
13
vendor/github.com/moby/buildkit/client/llb/marshal.go
generated
vendored
|
@ -14,21 +14,24 @@ import (
|
|||
type Definition struct {
|
||||
Def [][]byte
|
||||
Metadata map[digest.Digest]pb.OpMetadata
|
||||
Source *pb.Source
|
||||
}
|
||||
|
||||
func (def *Definition) ToPB() *pb.Definition {
|
||||
md := make(map[digest.Digest]pb.OpMetadata)
|
||||
md := make(map[digest.Digest]pb.OpMetadata, len(def.Metadata))
|
||||
for k, v := range def.Metadata {
|
||||
md[k] = v
|
||||
}
|
||||
return &pb.Definition{
|
||||
Def: def.Def,
|
||||
Source: def.Source,
|
||||
Metadata: md,
|
||||
}
|
||||
}
|
||||
|
||||
func (def *Definition) FromPB(x *pb.Definition) {
|
||||
def.Def = x.Def
|
||||
def.Source = x.Source
|
||||
def.Metadata = make(map[digest.Digest]pb.OpMetadata)
|
||||
for k, v := range x.Metadata {
|
||||
def.Metadata[k] = v
|
||||
|
@ -95,18 +98,20 @@ type MarshalCache struct {
|
|||
digest digest.Digest
|
||||
dt []byte
|
||||
md *pb.OpMetadata
|
||||
srcs []*SourceLocation
|
||||
constraints *Constraints
|
||||
}
|
||||
|
||||
func (mc *MarshalCache) Cached(c *Constraints) bool {
|
||||
return mc.dt != nil && mc.constraints == c
|
||||
}
|
||||
func (mc *MarshalCache) Load() (digest.Digest, []byte, *pb.OpMetadata, error) {
|
||||
return mc.digest, mc.dt, mc.md, nil
|
||||
func (mc *MarshalCache) Load() (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) {
|
||||
return mc.digest, mc.dt, mc.md, mc.srcs, nil
|
||||
}
|
||||
func (mc *MarshalCache) Store(dt []byte, md *pb.OpMetadata, c *Constraints) {
|
||||
func (mc *MarshalCache) Store(dt []byte, md *pb.OpMetadata, srcs []*SourceLocation, c *Constraints) {
|
||||
mc.digest = digest.FromBytes(dt)
|
||||
mc.dt = dt
|
||||
mc.md = md
|
||||
mc.constraints = c
|
||||
mc.srcs = srcs
|
||||
}
|
||||
|
|
8
vendor/github.com/moby/buildkit/client/llb/source.go
generated
vendored
8
vendor/github.com/moby/buildkit/client/llb/source.go
generated
vendored
|
@ -44,12 +44,12 @@ func (s *SourceOp) Validate(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *SourceOp) Marshal(ctx context.Context, constraints *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) {
|
||||
func (s *SourceOp) Marshal(ctx context.Context, constraints *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) {
|
||||
if s.Cached(constraints) {
|
||||
return s.Load()
|
||||
}
|
||||
if err := s.Validate(ctx); err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
|
||||
if strings.HasPrefix(s.id, "local://") {
|
||||
|
@ -74,10 +74,10 @@ func (s *SourceOp) Marshal(ctx context.Context, constraints *Constraints) (diges
|
|||
|
||||
dt, err := proto.Marshal()
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
|
||||
s.Store(dt, md, constraints)
|
||||
s.Store(dt, md, s.constraints.SourceLocations, constraints)
|
||||
return s.Load()
|
||||
}
|
||||
|
||||
|
|
111
vendor/github.com/moby/buildkit/client/llb/sourcemap.go
generated
vendored
Normal file
111
vendor/github.com/moby/buildkit/client/llb/sourcemap.go
generated
vendored
Normal file
|
@ -0,0 +1,111 @@
|
|||
package llb
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
type SourceMap struct {
|
||||
State *State
|
||||
Definition *Definition
|
||||
Filename string
|
||||
Data []byte
|
||||
}
|
||||
|
||||
func NewSourceMap(st *State, filename string, dt []byte) *SourceMap {
|
||||
return &SourceMap{
|
||||
State: st,
|
||||
Filename: filename,
|
||||
Data: dt,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SourceMap) Location(r []*pb.Range) ConstraintsOpt {
|
||||
return constraintsOptFunc(func(c *Constraints) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
c.SourceLocations = append(c.SourceLocations, &SourceLocation{
|
||||
SourceMap: s,
|
||||
Ranges: r,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
type SourceLocation struct {
|
||||
SourceMap *SourceMap
|
||||
Ranges []*pb.Range
|
||||
}
|
||||
|
||||
type sourceMapCollector struct {
|
||||
maps []*SourceMap
|
||||
index map[*SourceMap]int
|
||||
locations map[digest.Digest][]*SourceLocation
|
||||
}
|
||||
|
||||
func newSourceMapCollector() *sourceMapCollector {
|
||||
return &sourceMapCollector{
|
||||
index: map[*SourceMap]int{},
|
||||
locations: map[digest.Digest][]*SourceLocation{},
|
||||
}
|
||||
}
|
||||
|
||||
func (smc *sourceMapCollector) Add(dgst digest.Digest, ls []*SourceLocation) {
|
||||
for _, l := range ls {
|
||||
idx, ok := smc.index[l.SourceMap]
|
||||
if !ok {
|
||||
idx = len(smc.maps)
|
||||
smc.maps = append(smc.maps, l.SourceMap)
|
||||
}
|
||||
smc.index[l.SourceMap] = idx
|
||||
}
|
||||
smc.locations[dgst] = ls
|
||||
}
|
||||
|
||||
func (smc *sourceMapCollector) Marshal(ctx context.Context, co ...ConstraintsOpt) (*pb.Source, error) {
|
||||
s := &pb.Source{
|
||||
Locations: make(map[string]*pb.Locations),
|
||||
}
|
||||
for _, m := range smc.maps {
|
||||
def := m.Definition
|
||||
if def == nil && m.State != nil {
|
||||
var err error
|
||||
def, err = m.State.Marshal(ctx, co...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.Definition = def
|
||||
}
|
||||
|
||||
info := &pb.SourceInfo{
|
||||
Data: m.Data,
|
||||
Filename: m.Filename,
|
||||
}
|
||||
|
||||
if def != nil {
|
||||
info.Definition = def.ToPB()
|
||||
}
|
||||
|
||||
s.Infos = append(s.Infos, info)
|
||||
}
|
||||
|
||||
for dgst, locs := range smc.locations {
|
||||
pbLocs, ok := s.Locations[dgst.String()]
|
||||
if !ok {
|
||||
pbLocs = &pb.Locations{}
|
||||
}
|
||||
|
||||
for _, loc := range locs {
|
||||
pbLocs.Locations = append(pbLocs.Locations, &pb.Location{
|
||||
SourceIndex: int32(smc.index[loc.SourceMap]),
|
||||
Ranges: loc.Ranges,
|
||||
})
|
||||
}
|
||||
|
||||
s.Locations[dgst.String()] = pbLocs
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
21
vendor/github.com/moby/buildkit/client/llb/state.go
generated
vendored
21
vendor/github.com/moby/buildkit/client/llb/state.go
generated
vendored
|
@ -24,7 +24,7 @@ type Output interface {
|
|||
|
||||
type Vertex interface {
|
||||
Validate(context.Context) error
|
||||
Marshal(context.Context, *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error)
|
||||
Marshal(context.Context, *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error)
|
||||
Output() Output
|
||||
Inputs() []Output
|
||||
}
|
||||
|
@ -124,7 +124,9 @@ func (s State) Marshal(ctx context.Context, co ...ConstraintsOpt) (*Definition,
|
|||
o.SetConstraintsOption(c)
|
||||
}
|
||||
|
||||
def, err := marshal(ctx, s.Output().Vertex(ctx), def, map[digest.Digest]struct{}{}, map[Vertex]struct{}{}, c)
|
||||
smc := newSourceMapCollector()
|
||||
|
||||
def, err := marshal(ctx, s.Output().Vertex(ctx), def, smc, map[digest.Digest]struct{}{}, map[Vertex]struct{}{}, c)
|
||||
if err != nil {
|
||||
return def, err
|
||||
}
|
||||
|
@ -159,23 +161,28 @@ func (s State) Marshal(ctx context.Context, co ...ConstraintsOpt) (*Definition,
|
|||
}
|
||||
|
||||
def.Metadata[dgst] = md
|
||||
sm, err := smc.Marshal(ctx, co...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
def.Source = sm
|
||||
|
||||
return def, nil
|
||||
}
|
||||
|
||||
func marshal(ctx context.Context, v Vertex, def *Definition, cache map[digest.Digest]struct{}, vertexCache map[Vertex]struct{}, c *Constraints) (*Definition, error) {
|
||||
func marshal(ctx context.Context, v Vertex, def *Definition, s *sourceMapCollector, cache map[digest.Digest]struct{}, vertexCache map[Vertex]struct{}, c *Constraints) (*Definition, error) {
|
||||
if _, ok := vertexCache[v]; ok {
|
||||
return def, nil
|
||||
}
|
||||
for _, inp := range v.Inputs() {
|
||||
var err error
|
||||
def, err = marshal(ctx, inp.Vertex(ctx), def, cache, vertexCache, c)
|
||||
def, err = marshal(ctx, inp.Vertex(ctx), def, s, cache, vertexCache, c)
|
||||
if err != nil {
|
||||
return def, err
|
||||
}
|
||||
}
|
||||
|
||||
dgst, dt, opMeta, err := v.Marshal(ctx, c)
|
||||
dgst, dt, opMeta, sls, err := v.Marshal(ctx, c)
|
||||
if err != nil {
|
||||
return def, err
|
||||
}
|
||||
|
@ -186,6 +193,7 @@ func marshal(ctx context.Context, v Vertex, def *Definition, cache map[digest.Di
|
|||
if _, ok := cache[dgst]; ok {
|
||||
return def, nil
|
||||
}
|
||||
s.Add(dgst, sls)
|
||||
def.Def = append(def.Def, dt)
|
||||
cache[dgst] = struct{}{}
|
||||
return def, nil
|
||||
|
@ -367,7 +375,7 @@ func (o *output) ToInput(ctx context.Context, c *Constraints) (*pb.Input, error)
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
dgst, _, _, err := o.vertex.Marshal(ctx, c)
|
||||
dgst, _, _, _, err := o.vertex.Marshal(ctx, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -514,6 +522,7 @@ type Constraints struct {
|
|||
Metadata pb.OpMetadata
|
||||
LocalUniqueID string
|
||||
Caps *apicaps.CapSet
|
||||
SourceLocations []*SourceLocation
|
||||
}
|
||||
|
||||
func Platform(p specs.Platform) ConstraintsOpt {
|
||||
|
|
3
vendor/github.com/moby/buildkit/executor/oci/hosts.go
generated
vendored
3
vendor/github.com/moby/buildkit/executor/oci/hosts.go
generated
vendored
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const hostsContent = `
|
||||
|
@ -41,7 +42,7 @@ func makeHostsFile(stateDir string, extraHosts []executor.HostIP, idmap *idtools
|
|||
if err == nil {
|
||||
return "", func() {}, nil
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
|
|
2
vendor/github.com/moby/buildkit/executor/oci/mounts.go
generated
vendored
2
vendor/github.com/moby/buildkit/executor/oci/mounts.go
generated
vendored
|
@ -68,7 +68,7 @@ func withROBind(src, dest string) func(m []specs.Mount) ([]specs.Mount, error) {
|
|||
Destination: dest,
|
||||
Type: "bind",
|
||||
Source: src,
|
||||
Options: []string{"rbind", "ro"},
|
||||
Options: []string{"nosuid", "noexec", "nodev", "rbind", "ro"},
|
||||
})
|
||||
return m, nil
|
||||
}
|
||||
|
|
7
vendor/github.com/moby/buildkit/executor/oci/resolvconf.go
generated
vendored
7
vendor/github.com/moby/buildkit/executor/oci/resolvconf.go
generated
vendored
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/docker/libnetwork/resolvconf"
|
||||
"github.com/docker/libnetwork/types"
|
||||
"github.com/moby/buildkit/util/flightcontrol"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var g flightcontrol.Group
|
||||
|
@ -34,7 +35,7 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.Identity
|
|||
if !generate {
|
||||
fi, err := os.Stat(p)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
return "", err
|
||||
}
|
||||
generate = true
|
||||
|
@ -42,7 +43,7 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.Identity
|
|||
if !generate {
|
||||
fiMain, err := os.Stat(resolvconf.Path())
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
return nil, err
|
||||
}
|
||||
if lastNotEmpty {
|
||||
|
@ -64,7 +65,7 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.Identity
|
|||
var dt []byte
|
||||
f, err := resolvconfGet()
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
|
|
4
vendor/github.com/moby/buildkit/executor/oci/user.go
generated
vendored
4
vendor/github.com/moby/buildkit/executor/oci/user.go
generated
vendored
|
@ -2,7 +2,6 @@ package oci
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -11,7 +10,8 @@ import (
|
|||
containerdoci "github.com/containerd/containerd/oci"
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/opencontainers/runc/libcontainer/user"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func GetUser(ctx context.Context, root, username string) (uint32, uint32, []uint32, error) {
|
||||
|
|
3
vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go
generated
vendored
3
vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go
generated
vendored
|
@ -23,6 +23,7 @@ import (
|
|||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/network"
|
||||
rootlessspecconv "github.com/moby/buildkit/util/rootless/specconv"
|
||||
"github.com/moby/buildkit/util/stack"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
@ -302,7 +303,7 @@ func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache.
|
|||
case <-ctx.Done():
|
||||
return errors.Wrapf(ctx.Err(), err.Error())
|
||||
default:
|
||||
return err
|
||||
return stack.Enable(err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
51
vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
generated
vendored
51
vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
generated
vendored
|
@ -19,8 +19,10 @@ import (
|
|||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/parser"
|
||||
"github.com/moby/buildkit/frontend/gateway/client"
|
||||
gwpb "github.com/moby/buildkit/frontend/gateway/pb"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/apicaps"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
@ -226,6 +228,8 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
|
|||
return nil, errors.Wrapf(err, "failed to marshal local source")
|
||||
}
|
||||
|
||||
var sourceMap *llb.SourceMap
|
||||
|
||||
eg, ctx2 := errgroup.WithContext(ctx)
|
||||
var dtDockerfile []byte
|
||||
var dtDockerignore []byte
|
||||
|
@ -250,6 +254,9 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
|
|||
return errors.Wrapf(err, "failed to read dockerfile")
|
||||
}
|
||||
|
||||
sourceMap = llb.NewSourceMap(&src, filename, dtDockerfile)
|
||||
sourceMap.Definition = def
|
||||
|
||||
dt, err := ref.ReadFile(ctx2, client.ReadRequest{
|
||||
Filename: filename + ".dockerignore",
|
||||
})
|
||||
|
@ -310,9 +317,13 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
|
|||
}
|
||||
|
||||
if _, ok := opts["cmdline"]; !ok {
|
||||
ref, cmdline, ok := dockerfile2llb.DetectSyntax(bytes.NewBuffer(dtDockerfile))
|
||||
ref, cmdline, loc, ok := dockerfile2llb.DetectSyntax(bytes.NewBuffer(dtDockerfile))
|
||||
if ok {
|
||||
return forwardGateway(ctx, c, ref, cmdline)
|
||||
res, err := forwardGateway(ctx, c, ref, cmdline)
|
||||
if err != nil && len(errdefs.Sources(err)) == 0 {
|
||||
return nil, wrapSource(err, sourceMap, loc)
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -338,7 +349,13 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
|
|||
|
||||
for i, tp := range targetPlatforms {
|
||||
func(i int, tp *specs.Platform) {
|
||||
eg.Go(func() error {
|
||||
eg.Go(func() (err error) {
|
||||
defer func() {
|
||||
var el *parser.ErrorLocation
|
||||
if errors.As(err, &el) {
|
||||
err = wrapSource(err, sourceMap, el.Location)
|
||||
}
|
||||
}()
|
||||
st, img, err := dockerfile2llb.Dockerfile2LLB(ctx, dtDockerfile, dockerfile2llb.ConvertOpt{
|
||||
Target: opts[keyTarget],
|
||||
MetaResolver: c,
|
||||
|
@ -357,6 +374,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
|
|||
ForceNetMode: defaultNetMode,
|
||||
OverrideCopyImage: opts[keyOverrideCopyImage],
|
||||
LLBCaps: &caps,
|
||||
SourceMap: sourceMap,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
|
@ -639,3 +657,30 @@ func scopeToSubDir(c *llb.State, fileop bool, dir string) *llb.State {
|
|||
bc := unpack.AddMount("/out", llb.Scratch())
|
||||
return &bc
|
||||
}
|
||||
|
||||
func wrapSource(err error, sm *llb.SourceMap, ranges []parser.Range) error {
|
||||
if sm == nil {
|
||||
return err
|
||||
}
|
||||
s := errdefs.Source{
|
||||
Info: &pb.SourceInfo{
|
||||
Data: sm.Data,
|
||||
Filename: sm.Filename,
|
||||
Definition: sm.Definition.ToPB(),
|
||||
},
|
||||
Ranges: make([]*pb.Range, 0, len(ranges)),
|
||||
}
|
||||
for _, r := range ranges {
|
||||
s.Ranges = append(s.Ranges, &pb.Range{
|
||||
Start: pb.Position{
|
||||
Line: int32(r.Start.Line),
|
||||
Character: int32(r.Start.Character),
|
||||
},
|
||||
End: pb.Position{
|
||||
Line: int32(r.End.Line),
|
||||
Character: int32(r.End.Character),
|
||||
},
|
||||
})
|
||||
}
|
||||
return errdefs.WithSource(err, s)
|
||||
}
|
||||
|
|
99
vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
generated
vendored
99
vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
generated
vendored
|
@ -7,6 +7,7 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
@ -60,6 +61,7 @@ type ConvertOpt struct {
|
|||
OverrideCopyImage string
|
||||
LLBCaps *apicaps.CapSet
|
||||
ContextLocalName string
|
||||
SourceMap *llb.SourceMap
|
||||
}
|
||||
|
||||
func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, error) {
|
||||
|
@ -110,10 +112,10 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
|
|||
for i, st := range stages {
|
||||
name, err := shlex.ProcessWordWithMap(st.BaseName, metaArgsToMap(optMetaArgs))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, parser.WithLocation(err, st.Location)
|
||||
}
|
||||
if name == "" {
|
||||
return nil, nil, errors.Errorf("base name (%s) should not be blank", st.BaseName)
|
||||
return nil, nil, parser.WithLocation(errors.Errorf("base name (%s) should not be blank", st.BaseName), st.Location)
|
||||
}
|
||||
st.BaseName = name
|
||||
|
||||
|
@ -132,12 +134,12 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
|
|||
if v := st.Platform; v != "" {
|
||||
v, err := shlex.ProcessWordWithMap(v, metaArgsToMap(optMetaArgs))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to process arguments for platform %s", v)
|
||||
return nil, nil, parser.WithLocation(errors.Wrapf(err, "failed to process arguments for platform %s", v), st.Location)
|
||||
}
|
||||
|
||||
p, err := platforms.Parse(v)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to parse platform %s", v)
|
||||
return nil, nil, parser.WithLocation(errors.Wrapf(err, "failed to parse platform %s", v), st.Location)
|
||||
}
|
||||
ds.platform = &p
|
||||
}
|
||||
|
@ -204,7 +206,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
|
|||
}
|
||||
|
||||
if has, state := hasCircularDependency(allDispatchStates.states); has {
|
||||
return nil, nil, fmt.Errorf("circular dependency detected on stage: %s", state.stageName)
|
||||
return nil, nil, errors.Errorf("circular dependency detected on stage: %s", state.stageName)
|
||||
}
|
||||
|
||||
if len(allDispatchStates.states) == 1 {
|
||||
|
@ -225,7 +227,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
|
|||
eg.Go(func() error {
|
||||
ref, err := reference.ParseNormalizedNamed(d.stage.BaseName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse stage name %q", d.stage.BaseName)
|
||||
return parser.WithLocation(errors.Wrapf(err, "failed to parse stage name %q", d.stage.BaseName), d.stage.Location)
|
||||
}
|
||||
platform := d.platform
|
||||
if platform == nil {
|
||||
|
@ -278,7 +280,13 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
|
|||
if isScratch {
|
||||
d.state = llb.Scratch()
|
||||
} else {
|
||||
d.state = llb.Image(d.stage.BaseName, dfCmd(d.stage.SourceCode), llb.Platform(*platform), opt.ImageResolveMode, llb.WithCustomName(prefixCommand(d, "FROM "+d.stage.BaseName, opt.PrefixPlatform, platform)))
|
||||
d.state = llb.Image(d.stage.BaseName,
|
||||
dfCmd(d.stage.SourceCode),
|
||||
llb.Platform(*platform),
|
||||
opt.ImageResolveMode,
|
||||
llb.WithCustomName(prefixCommand(d, "FROM "+d.stage.BaseName, opt.PrefixPlatform, platform)),
|
||||
location(opt.SourceMap, d.stage.Location),
|
||||
)
|
||||
}
|
||||
d.platform = platform
|
||||
return nil
|
||||
|
@ -316,12 +324,12 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
|
|||
}
|
||||
if d.image.Config.WorkingDir != "" {
|
||||
if err = dispatchWorkdir(d, &instructions.WorkdirCommand{Path: d.image.Config.WorkingDir}, false, nil); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, parser.WithLocation(err, d.stage.Location)
|
||||
}
|
||||
}
|
||||
if d.image.Config.User != "" {
|
||||
if err = dispatchUser(d, &instructions.UserCommand{User: d.image.Config.User}, false); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, parser.WithLocation(err, d.stage.Location)
|
||||
}
|
||||
}
|
||||
d.state = d.state.Network(opt.ForceNetMode)
|
||||
|
@ -340,19 +348,20 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
|
|||
extraHosts: opt.ExtraHosts,
|
||||
copyImage: opt.OverrideCopyImage,
|
||||
llbCaps: opt.LLBCaps,
|
||||
sourceMap: opt.SourceMap,
|
||||
}
|
||||
if opt.copyImage == "" {
|
||||
opt.copyImage = DefaultCopyImage
|
||||
}
|
||||
|
||||
if err = dispatchOnBuildTriggers(d, d.image.Config.OnBuild, opt); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, parser.WithLocation(err, d.stage.Location)
|
||||
}
|
||||
d.image.Config.OnBuild = nil
|
||||
|
||||
for _, cmd := range d.commands {
|
||||
if err := dispatch(d, cmd, opt); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, parser.WithLocation(err, cmd.Location())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -421,7 +430,7 @@ func toCommand(ic instructions.Command, allDispatchStates *dispatchStates) (comm
|
|||
stn, ok = allDispatchStates.findStateByName(c.From)
|
||||
if !ok {
|
||||
stn = &dispatchState{
|
||||
stage: instructions.Stage{BaseName: c.From},
|
||||
stage: instructions.Stage{BaseName: c.From, Location: ic.Location()},
|
||||
deps: make(map[*dispatchState]struct{}),
|
||||
unregistered: true,
|
||||
}
|
||||
|
@ -457,6 +466,7 @@ type dispatchOpt struct {
|
|||
extraHosts []llb.HostIP
|
||||
copyImage string
|
||||
llbCaps *apicaps.CapSet
|
||||
sourceMap *llb.SourceMap
|
||||
}
|
||||
|
||||
func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error {
|
||||
|
@ -484,7 +494,7 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error {
|
|||
case *instructions.WorkdirCommand:
|
||||
err = dispatchWorkdir(d, c, true, &opt)
|
||||
case *instructions.AddCommand:
|
||||
err = dispatchCopy(d, c.SourcesAndDest, opt.buildContext, true, c, c.Chown, opt)
|
||||
err = dispatchCopy(d, c.SourcesAndDest, opt.buildContext, true, c, c.Chown, c.Chmod, c.Location(), opt)
|
||||
if err == nil {
|
||||
for _, src := range c.Sources() {
|
||||
if !strings.HasPrefix(src, "http://") && !strings.HasPrefix(src, "https://") {
|
||||
|
@ -519,7 +529,7 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error {
|
|||
if len(cmd.sources) != 0 {
|
||||
l = cmd.sources[0].state
|
||||
}
|
||||
err = dispatchCopy(d, c.SourcesAndDest, l, false, c, c.Chown, opt)
|
||||
err = dispatchCopy(d, c.SourcesAndDest, l, false, c, c.Chown, c.Chmod, c.Location(), opt)
|
||||
if err == nil && len(cmd.sources) == 0 {
|
||||
for _, src := range c.Sources() {
|
||||
d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{}
|
||||
|
@ -634,7 +644,7 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opt := []llb.RunOption{llb.Args(args), dfCmd(c)}
|
||||
opt := []llb.RunOption{llb.Args(args), dfCmd(c), location(dopt.sourceMap, c.Location())}
|
||||
if d.ignoreCache {
|
||||
opt = append(opt, llb.IgnoreCache)
|
||||
}
|
||||
|
@ -702,7 +712,10 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.state = d.state.File(llb.Mkdir(wd, 0755, mkdirOpt...), llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, c.String(), env)), d.prefixPlatform, &platform)))
|
||||
d.state = d.state.File(llb.Mkdir(wd, 0755, mkdirOpt...),
|
||||
llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, c.String(), env)), d.prefixPlatform, &platform)),
|
||||
location(opt.sourceMap, c.Location()),
|
||||
)
|
||||
withLayer = true
|
||||
}
|
||||
return commitToHistory(&d.image, "WORKDIR "+wd, withLayer, nil)
|
||||
|
@ -710,7 +723,7 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo
|
|||
return nil
|
||||
}
|
||||
|
||||
func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint fmt.Stringer, chown string, opt dispatchOpt) error {
|
||||
func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint fmt.Stringer, chown string, chmod string, loc []parser.Range, opt dispatchOpt) error {
|
||||
pp, err := pathRelativeToWorkingDir(d.state, c.Dest())
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -726,6 +739,15 @@ func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceS
|
|||
copyOpt = append(copyOpt, llb.WithUser(chown))
|
||||
}
|
||||
|
||||
var mode *os.FileMode
|
||||
if chmod != "" {
|
||||
p, err := strconv.ParseUint(chmod, 8, 32)
|
||||
if err == nil {
|
||||
perm := os.FileMode(p)
|
||||
mode = &perm
|
||||
}
|
||||
}
|
||||
|
||||
commitMessage := bytes.NewBufferString("")
|
||||
if isAddCommand {
|
||||
commitMessage.WriteString("ADD")
|
||||
|
@ -768,6 +790,7 @@ func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceS
|
|||
}
|
||||
} else {
|
||||
opts := append([]llb.CopyOption{&llb.CopyInfo{
|
||||
Mode: mode,
|
||||
FollowSymlinks: true,
|
||||
CopyDirContentsOnly: true,
|
||||
AttemptUnpack: isAddCommand,
|
||||
|
@ -796,7 +819,10 @@ func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceS
|
|||
return err
|
||||
}
|
||||
|
||||
fileOpt := []llb.ConstraintsOpt{llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, cmdToPrint.String(), env)), d.prefixPlatform, &platform))}
|
||||
fileOpt := []llb.ConstraintsOpt{
|
||||
llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, cmdToPrint.String(), env)), d.prefixPlatform, &platform)),
|
||||
location(opt.sourceMap, loc),
|
||||
}
|
||||
if d.ignoreCache {
|
||||
fileOpt = append(fileOpt, llb.IgnoreCache)
|
||||
}
|
||||
|
@ -805,9 +831,16 @@ func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceS
|
|||
return commitToHistory(&d.image, commitMessage.String(), true, &d.state)
|
||||
}
|
||||
|
||||
func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint fmt.Stringer, chown string, opt dispatchOpt) error {
|
||||
func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint fmt.Stringer, chown string, chmod string, loc []parser.Range, opt dispatchOpt) error {
|
||||
if useFileOp(opt.buildArgValues, opt.llbCaps) {
|
||||
return dispatchCopyFileOp(d, c, sourceState, isAddCommand, cmdToPrint, chown, opt)
|
||||
return dispatchCopyFileOp(d, c, sourceState, isAddCommand, cmdToPrint, chown, chmod, loc, opt)
|
||||
}
|
||||
|
||||
if chmod != "" {
|
||||
if opt.llbCaps != nil && opt.llbCaps.Supports(pb.CapFileBase) != nil {
|
||||
return errors.Wrap(opt.llbCaps.Supports(pb.CapFileBase), "chmod is not supported")
|
||||
}
|
||||
return errors.New("chmod is not supported")
|
||||
}
|
||||
|
||||
img := llb.Image(opt.copyImage, llb.MarkImageInternal, llb.Platform(opt.buildPlatforms[0]), WithInternalName("helper image for file operations"))
|
||||
|
@ -893,7 +926,14 @@ func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState l
|
|||
return err
|
||||
}
|
||||
|
||||
runOpt := []llb.RunOption{llb.Args(args), llb.Dir("/dest"), llb.ReadonlyRootFS(), dfCmd(cmdToPrint), llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, cmdToPrint.String(), env)), d.prefixPlatform, &platform))}
|
||||
runOpt := []llb.RunOption{
|
||||
llb.Args(args),
|
||||
llb.Dir("/dest"),
|
||||
llb.ReadonlyRootFS(),
|
||||
dfCmd(cmdToPrint),
|
||||
llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, cmdToPrint.String(), env)), d.prefixPlatform, &platform)),
|
||||
location(opt.sourceMap, loc),
|
||||
}
|
||||
if d.ignoreCache {
|
||||
runOpt = append(runOpt, llb.IgnoreCache)
|
||||
}
|
||||
|
@ -1361,3 +1401,20 @@ func useFileOp(args map[string]string, caps *apicaps.CapSet) bool {
|
|||
}
|
||||
return enabled && caps != nil && caps.Supports(pb.CapFileBase) == nil
|
||||
}
|
||||
|
||||
func location(sm *llb.SourceMap, locations []parser.Range) llb.ConstraintsOpt {
|
||||
loc := make([]*pb.Range, 0, len(locations))
|
||||
for _, l := range locations {
|
||||
loc = append(loc, &pb.Range{
|
||||
Start: pb.Position{
|
||||
Line: int32(l.Start.Line),
|
||||
Character: int32(l.Start.Character),
|
||||
},
|
||||
End: pb.Position{
|
||||
Line: int32(l.End.Line),
|
||||
Character: int32(l.End.Character),
|
||||
},
|
||||
})
|
||||
}
|
||||
return sm.Location(loc)
|
||||
}
|
||||
|
|
33
vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go
generated
vendored
33
vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go
generated
vendored
|
@ -5,34 +5,51 @@ import (
|
|||
"io"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/moby/buildkit/frontend/dockerfile/parser"
|
||||
)
|
||||
|
||||
const keySyntax = "syntax"
|
||||
|
||||
var reDirective = regexp.MustCompile(`^#\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`)
|
||||
|
||||
func DetectSyntax(r io.Reader) (string, string, bool) {
|
||||
type Directive struct {
|
||||
Name string
|
||||
Value string
|
||||
Location []parser.Range
|
||||
}
|
||||
|
||||
func DetectSyntax(r io.Reader) (string, string, []parser.Range, bool) {
|
||||
directives := ParseDirectives(r)
|
||||
if len(directives) == 0 {
|
||||
return "", "", false
|
||||
return "", "", nil, false
|
||||
}
|
||||
v, ok := directives[keySyntax]
|
||||
if !ok {
|
||||
return "", "", false
|
||||
return "", "", nil, false
|
||||
}
|
||||
p := strings.SplitN(v, " ", 2)
|
||||
return p[0], v, true
|
||||
p := strings.SplitN(v.Value, " ", 2)
|
||||
return p[0], v.Value, v.Location, true
|
||||
}
|
||||
|
||||
func ParseDirectives(r io.Reader) map[string]string {
|
||||
m := map[string]string{}
|
||||
func ParseDirectives(r io.Reader) map[string]Directive {
|
||||
m := map[string]Directive{}
|
||||
s := bufio.NewScanner(r)
|
||||
var l int
|
||||
for s.Scan() {
|
||||
l++
|
||||
match := reDirective.FindStringSubmatch(s.Text())
|
||||
if len(match) == 0 {
|
||||
return m
|
||||
}
|
||||
m[strings.ToLower(match[1])] = match[2]
|
||||
m[strings.ToLower(match[1])] = Directive{
|
||||
Name: match[1],
|
||||
Value: match[2],
|
||||
Location: []parser.Range{{
|
||||
Start: parser.Position{Line: l},
|
||||
End: parser.Position{Line: l},
|
||||
}},
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
|
26
vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go
generated
vendored
26
vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go
generated
vendored
|
@ -1,11 +1,12 @@
|
|||
package instructions
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/parser"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// KeyValuePair represent an arbitrary named value (useful in slice instead of map[string] string to preserve ordering)
|
||||
|
@ -35,6 +36,7 @@ func (kvpo *KeyValuePairOptional) ValueString() string {
|
|||
// Command is implemented by every command present in a dockerfile
|
||||
type Command interface {
|
||||
Name() string
|
||||
Location() []parser.Range
|
||||
}
|
||||
|
||||
// KeyValuePairs is a slice of KeyValuePair
|
||||
|
@ -42,8 +44,9 @@ type KeyValuePairs []KeyValuePair
|
|||
|
||||
// withNameAndCode is the base of every command in a Dockerfile (String() returns its source code)
|
||||
type withNameAndCode struct {
|
||||
code string
|
||||
name string
|
||||
code string
|
||||
name string
|
||||
location []parser.Range
|
||||
}
|
||||
|
||||
func (c *withNameAndCode) String() string {
|
||||
|
@ -55,8 +58,13 @@ func (c *withNameAndCode) Name() string {
|
|||
return c.name
|
||||
}
|
||||
|
||||
// Location of the command in source
|
||||
func (c *withNameAndCode) Location() []parser.Range {
|
||||
return c.location
|
||||
}
|
||||
|
||||
func newWithNameAndCode(req parseRequest) withNameAndCode {
|
||||
return withNameAndCode{code: strings.TrimSpace(req.original), name: req.command}
|
||||
return withNameAndCode{code: strings.TrimSpace(req.original), name: req.command, location: req.location}
|
||||
}
|
||||
|
||||
// SingleWordExpander is a provider for variable expansion where 1 word => 1 output
|
||||
|
@ -180,10 +188,16 @@ type AddCommand struct {
|
|||
withNameAndCode
|
||||
SourcesAndDest
|
||||
Chown string
|
||||
Chmod string
|
||||
}
|
||||
|
||||
// Expand variables
|
||||
func (c *AddCommand) Expand(expander SingleWordExpander) error {
|
||||
expandedChown, err := expander(c.Chown)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Chown = expandedChown
|
||||
return expandSliceInPlace(c.SourcesAndDest, expander)
|
||||
}
|
||||
|
||||
|
@ -196,6 +210,7 @@ type CopyCommand struct {
|
|||
SourcesAndDest
|
||||
From string
|
||||
Chown string
|
||||
Chmod string
|
||||
}
|
||||
|
||||
// Expand variables
|
||||
|
@ -400,6 +415,7 @@ type Stage struct {
|
|||
BaseName string
|
||||
SourceCode string
|
||||
Platform string
|
||||
Location []parser.Range
|
||||
}
|
||||
|
||||
// AddCommand to the stage
|
||||
|
@ -419,7 +435,7 @@ func IsCurrentStage(s []Stage, name string) bool {
|
|||
// CurrentStage return the last stage in a slice
|
||||
func CurrentStage(s []Stage) (*Stage, error) {
|
||||
if len(s) == 0 {
|
||||
return nil, errors.New("No build stage in current context")
|
||||
return nil, errors.New("no build stage in current context")
|
||||
}
|
||||
return &s[len(s)-1], nil
|
||||
}
|
||||
|
|
36
vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go
generated
vendored
36
vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go
generated
vendored
|
@ -21,6 +21,7 @@ type parseRequest struct {
|
|||
attributes map[string]bool
|
||||
flags *BFlags
|
||||
original string
|
||||
location []parser.Range
|
||||
}
|
||||
|
||||
var parseRunPreHooks []func(*RunCommand, parseRequest) error
|
||||
|
@ -48,11 +49,15 @@ func newParseRequestFromNode(node *parser.Node) parseRequest {
|
|||
attributes: node.Attributes,
|
||||
original: node.Original,
|
||||
flags: NewBFlagsWithArgs(node.Flags),
|
||||
location: node.Location(),
|
||||
}
|
||||
}
|
||||
|
||||
// ParseInstruction converts an AST to a typed instruction (either a command or a build stage beginning when encountering a `FROM` statement)
|
||||
func ParseInstruction(node *parser.Node) (interface{}, error) {
|
||||
func ParseInstruction(node *parser.Node) (v interface{}, err error) {
|
||||
defer func() {
|
||||
err = parser.WithLocation(err, node.Location())
|
||||
}()
|
||||
req := newParseRequestFromNode(node)
|
||||
switch node.Value {
|
||||
case command.Env:
|
||||
|
@ -105,7 +110,7 @@ func ParseCommand(node *parser.Node) (Command, error) {
|
|||
if c, ok := s.(Command); ok {
|
||||
return c, nil
|
||||
}
|
||||
return nil, errors.Errorf("%T is not a command type", s)
|
||||
return nil, parser.WithLocation(errors.Errorf("%T is not a command type", s), node.Location())
|
||||
}
|
||||
|
||||
// UnknownInstruction represents an error occurring when a command is unresolvable
|
||||
|
@ -118,25 +123,17 @@ func (e *UnknownInstruction) Error() string {
|
|||
return fmt.Sprintf("unknown instruction: %s", strings.ToUpper(e.Instruction))
|
||||
}
|
||||
|
||||
// IsUnknownInstruction checks if the error is an UnknownInstruction or a parseError containing an UnknownInstruction
|
||||
func IsUnknownInstruction(err error) bool {
|
||||
_, ok := err.(*UnknownInstruction)
|
||||
if !ok {
|
||||
var pe *parseError
|
||||
if pe, ok = err.(*parseError); ok {
|
||||
_, ok = pe.inner.(*UnknownInstruction)
|
||||
}
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
type parseError struct {
|
||||
inner error
|
||||
node *parser.Node
|
||||
}
|
||||
|
||||
func (e *parseError) Error() string {
|
||||
return fmt.Sprintf("Dockerfile parse error line %d: %v", e.node.StartLine, e.inner.Error())
|
||||
return fmt.Sprintf("dockerfile parse error line %d: %v", e.node.StartLine, e.inner.Error())
|
||||
}
|
||||
|
||||
func (e *parseError) Unwrap() error {
|
||||
return e.inner
|
||||
}
|
||||
|
||||
// Parse a Dockerfile into a collection of buildable stages.
|
||||
|
@ -160,11 +157,11 @@ func Parse(ast *parser.Node) (stages []Stage, metaArgs []ArgCommand, err error)
|
|||
case Command:
|
||||
stage, err := CurrentStage(stages)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, parser.WithLocation(err, n.Location())
|
||||
}
|
||||
stage.AddCommand(c)
|
||||
default:
|
||||
return nil, nil, errors.Errorf("%T is not a command type", cmd)
|
||||
return nil, nil, parser.WithLocation(errors.Errorf("%T is not a command type", cmd), n.Location())
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -242,6 +239,7 @@ func parseAdd(req parseRequest) (*AddCommand, error) {
|
|||
return nil, errNoDestinationArgument("ADD")
|
||||
}
|
||||
flChown := req.flags.AddString("chown", "")
|
||||
flChmod := req.flags.AddString("chmod", "")
|
||||
if err := req.flags.Parse(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -249,6 +247,7 @@ func parseAdd(req parseRequest) (*AddCommand, error) {
|
|||
SourcesAndDest: SourcesAndDest(req.args),
|
||||
withNameAndCode: newWithNameAndCode(req),
|
||||
Chown: flChown.Value,
|
||||
Chmod: flChmod.Value,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -258,6 +257,7 @@ func parseCopy(req parseRequest) (*CopyCommand, error) {
|
|||
}
|
||||
flChown := req.flags.AddString("chown", "")
|
||||
flFrom := req.flags.AddString("from", "")
|
||||
flChmod := req.flags.AddString("chmod", "")
|
||||
if err := req.flags.Parse(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -266,6 +266,7 @@ func parseCopy(req parseRequest) (*CopyCommand, error) {
|
|||
From: flFrom.Value,
|
||||
withNameAndCode: newWithNameAndCode(req),
|
||||
Chown: flChown.Value,
|
||||
Chmod: flChmod.Value,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -287,6 +288,7 @@ func parseFrom(req parseRequest) (*Stage, error) {
|
|||
SourceCode: code,
|
||||
Commands: []Command{},
|
||||
Platform: flPlatform.Value,
|
||||
Location: req.location,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
|
58
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/errors.go
generated
vendored
Normal file
58
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"github.com/moby/buildkit/util/stack"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ErrorLocation gives a location in source code that caused the error
|
||||
type ErrorLocation struct {
|
||||
Location []Range
|
||||
error
|
||||
}
|
||||
|
||||
// Unwrap unwraps to the next error
|
||||
func (e *ErrorLocation) Unwrap() error {
|
||||
return e.error
|
||||
}
|
||||
|
||||
// Range is a code section between two positions
|
||||
type Range struct {
|
||||
Start Position
|
||||
End Position
|
||||
}
|
||||
|
||||
// Position is a point in source code
|
||||
type Position struct {
|
||||
Line int
|
||||
Character int
|
||||
}
|
||||
|
||||
func withLocation(err error, start, end int) error {
|
||||
return WithLocation(err, toRanges(start, end))
|
||||
}
|
||||
|
||||
// WithLocation extends an error with a source code location
|
||||
func WithLocation(err error, location []Range) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
var el *ErrorLocation
|
||||
if errors.As(err, &el) {
|
||||
return err
|
||||
}
|
||||
return stack.Enable(&ErrorLocation{
|
||||
error: err,
|
||||
Location: location,
|
||||
})
|
||||
}
|
||||
|
||||
func toRanges(start, end int) (r []Range) {
|
||||
if end <= start {
|
||||
end = start
|
||||
}
|
||||
for i := start; i <= end; i++ {
|
||||
r = append(r, Range{Start: Position{Line: i}, End: Position{Line: i}})
|
||||
}
|
||||
return
|
||||
}
|
33
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go
generated
vendored
33
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go
generated
vendored
|
@ -8,11 +8,12 @@ package parser
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -25,7 +26,7 @@ const (
|
|||
|
||||
// ignore the current argument. This will still leave a command parsed, but
|
||||
// will not incorporate the arguments into the ast.
|
||||
func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
func parseIgnore(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||
return &Node{}, nil, nil
|
||||
}
|
||||
|
||||
|
@ -34,7 +35,7 @@ func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) {
|
|||
//
|
||||
// ONBUILD RUN foo bar -> (onbuild (run foo bar))
|
||||
//
|
||||
func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
func parseSubCommand(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||
if rest == "" {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
@ -50,7 +51,7 @@ func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error)
|
|||
// helper to parse words (i.e space delimited or quoted strings) in a statement.
|
||||
// The quotes are preserved as part of this function and they are stripped later
|
||||
// as part of processWords().
|
||||
func parseWords(rest string, d *Directive) []string {
|
||||
func parseWords(rest string, d *directives) []string {
|
||||
const (
|
||||
inSpaces = iota // looking for start of a word
|
||||
inWord
|
||||
|
@ -137,7 +138,7 @@ func parseWords(rest string, d *Directive) []string {
|
|||
|
||||
// parse environment like statements. Note that this does *not* handle
|
||||
// variable interpolation, which will be handled in the evaluator.
|
||||
func parseNameVal(rest string, key string, d *Directive) (*Node, error) {
|
||||
func parseNameVal(rest string, key string, d *directives) (*Node, error) {
|
||||
// This is kind of tricky because we need to support the old
|
||||
// variant: KEY name value
|
||||
// as well as the new one: KEY name=value ...
|
||||
|
@ -151,7 +152,7 @@ func parseNameVal(rest string, key string, d *Directive) (*Node, error) {
|
|||
|
||||
// Old format (KEY name value)
|
||||
if !strings.Contains(words[0], "=") {
|
||||
parts := tokenWhitespace.Split(rest, 2)
|
||||
parts := reWhitespace.Split(rest, 2)
|
||||
if len(parts) < 2 {
|
||||
return nil, fmt.Errorf(key + " must have two arguments")
|
||||
}
|
||||
|
@ -192,12 +193,12 @@ func appendKeyValueNode(node, rootNode, prevNode *Node) (*Node, *Node) {
|
|||
return rootNode, prevNode
|
||||
}
|
||||
|
||||
func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
func parseEnv(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||
node, err := parseNameVal(rest, "ENV", d)
|
||||
return node, nil, err
|
||||
}
|
||||
|
||||
func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
func parseLabel(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||
node, err := parseNameVal(rest, commandLabel, d)
|
||||
return node, nil, err
|
||||
}
|
||||
|
@ -210,7 +211,7 @@ func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) {
|
|||
// In addition, a keyword definition alone is of the form `keyword` like `name1`
|
||||
// above. And the assignments `name2=` and `name3=""` are equivalent and
|
||||
// assign an empty value to the respective keywords.
|
||||
func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
func parseNameOrNameVal(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||
words := parseWords(rest, d)
|
||||
if len(words) == 0 {
|
||||
return nil, nil, nil
|
||||
|
@ -236,7 +237,7 @@ func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, erro
|
|||
|
||||
// parses a whitespace-delimited set of arguments. The result is effectively a
|
||||
// linked list of string arguments.
|
||||
func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
func parseStringsWhitespaceDelimited(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||
if rest == "" {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
@ -244,7 +245,7 @@ func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[stri
|
|||
node := &Node{}
|
||||
rootnode := node
|
||||
prevnode := node
|
||||
for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp
|
||||
for _, str := range reWhitespace.Split(rest, -1) { // use regexp
|
||||
prevnode = node
|
||||
node.Value = str
|
||||
node.Next = &Node{}
|
||||
|
@ -260,7 +261,7 @@ func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[stri
|
|||
}
|
||||
|
||||
// parseString just wraps the string in quotes and returns a working node.
|
||||
func parseString(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
func parseString(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||
if rest == "" {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
@ -270,7 +271,7 @@ func parseString(rest string, d *Directive) (*Node, map[string]bool, error) {
|
|||
}
|
||||
|
||||
// parseJSON converts JSON arrays to an AST.
|
||||
func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
func parseJSON(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||
rest = strings.TrimLeftFunc(rest, unicode.IsSpace)
|
||||
if !strings.HasPrefix(rest, "[") {
|
||||
return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest)
|
||||
|
@ -303,7 +304,7 @@ func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
|
|||
// parseMaybeJSON determines if the argument appears to be a JSON array. If
|
||||
// so, passes to parseJSON; if not, quotes the result and returns a single
|
||||
// node.
|
||||
func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
func parseMaybeJSON(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||
if rest == "" {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
@ -325,7 +326,7 @@ func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
|
|||
// parseMaybeJSONToList determines if the argument appears to be a JSON array. If
|
||||
// so, passes to parseJSON; if not, attempts to parse it as a whitespace
|
||||
// delimited string.
|
||||
func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
func parseMaybeJSONToList(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||
node, attrs, err := parseJSON(rest, d)
|
||||
|
||||
if err == nil {
|
||||
|
@ -339,7 +340,7 @@ func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, er
|
|||
}
|
||||
|
||||
// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument.
|
||||
func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
func parseHealthConfig(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||
// Find end of first argument
|
||||
var sep int
|
||||
for ; sep < len(rest); sep++ {
|
||||
|
|
108
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go
generated
vendored
108
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go
generated
vendored
|
@ -38,6 +38,11 @@ type Node struct {
|
|||
EndLine int // the line in the original dockerfile where the node ends
|
||||
}
|
||||
|
||||
// Location return the location of node in source code
|
||||
func (node *Node) Location() []Range {
|
||||
return toRanges(node.StartLine, node.EndLine)
|
||||
}
|
||||
|
||||
// Dump dumps the AST defined by `node` as a list of sexps.
|
||||
// Returns a string suitable for printing.
|
||||
func (node *Node) Dump() string {
|
||||
|
@ -79,28 +84,33 @@ func (node *Node) AddChild(child *Node, startLine, endLine int) {
|
|||
}
|
||||
|
||||
var (
|
||||
dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error)
|
||||
tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`)
|
||||
tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P<escapechar>.).*$`)
|
||||
tokenComment = regexp.MustCompile(`^#.*$`)
|
||||
dispatch map[string]func(string, *directives) (*Node, map[string]bool, error)
|
||||
reWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`)
|
||||
reDirectives = regexp.MustCompile(`^#\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`)
|
||||
reComment = regexp.MustCompile(`^#.*$`)
|
||||
)
|
||||
|
||||
// DefaultEscapeToken is the default escape token
|
||||
const DefaultEscapeToken = '\\'
|
||||
|
||||
// Directive is the structure used during a build run to hold the state of
|
||||
var validDirectives = map[string]struct{}{
|
||||
"escape": {},
|
||||
"syntax": {},
|
||||
}
|
||||
|
||||
// directive is the structure used during a build run to hold the state of
|
||||
// parsing directives.
|
||||
type Directive struct {
|
||||
escapeToken rune // Current escape token
|
||||
lineContinuationRegex *regexp.Regexp // Current line continuation regex
|
||||
processingComplete bool // Whether we are done looking for directives
|
||||
escapeSeen bool // Whether the escape directive has been seen
|
||||
type directives struct {
|
||||
escapeToken rune // Current escape token
|
||||
lineContinuationRegex *regexp.Regexp // Current line continuation regex
|
||||
done bool // Whether we are done looking for directives
|
||||
seen map[string]struct{} // Whether the escape directive has been seen
|
||||
}
|
||||
|
||||
// setEscapeToken sets the default token for escaping characters in a Dockerfile.
|
||||
func (d *Directive) setEscapeToken(s string) error {
|
||||
func (d *directives) setEscapeToken(s string) error {
|
||||
if s != "`" && s != "\\" {
|
||||
return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s)
|
||||
return errors.Errorf("invalid escape token '%s' does not match ` or \\", s)
|
||||
}
|
||||
d.escapeToken = rune(s[0])
|
||||
d.lineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`)
|
||||
|
@ -110,33 +120,43 @@ func (d *Directive) setEscapeToken(s string) error {
|
|||
// possibleParserDirective looks for parser directives, eg '# escapeToken=<char>'.
|
||||
// Parser directives must precede any builder instruction or other comments,
|
||||
// and cannot be repeated.
|
||||
func (d *Directive) possibleParserDirective(line string) error {
|
||||
if d.processingComplete {
|
||||
func (d *directives) possibleParserDirective(line string) error {
|
||||
if d.done {
|
||||
return nil
|
||||
}
|
||||
|
||||
tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line))
|
||||
if len(tecMatch) != 0 {
|
||||
for i, n := range tokenEscapeCommand.SubexpNames() {
|
||||
if n == "escapechar" {
|
||||
if d.escapeSeen {
|
||||
return errors.New("only one escape parser directive can be used")
|
||||
}
|
||||
d.escapeSeen = true
|
||||
return d.setEscapeToken(tecMatch[i])
|
||||
}
|
||||
}
|
||||
match := reDirectives.FindStringSubmatch(line)
|
||||
if len(match) == 0 {
|
||||
d.done = true
|
||||
return nil
|
||||
}
|
||||
|
||||
k := strings.ToLower(match[1])
|
||||
_, ok := validDirectives[k]
|
||||
if !ok {
|
||||
d.done = true
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, ok := d.seen[k]; ok {
|
||||
return errors.Errorf("only one %s parser directive can be used", k)
|
||||
}
|
||||
d.seen[k] = struct{}{}
|
||||
|
||||
if k == "escape" {
|
||||
return d.setEscapeToken(match[2])
|
||||
}
|
||||
|
||||
d.processingComplete = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewDefaultDirective returns a new Directive with the default escapeToken token
|
||||
func NewDefaultDirective() *Directive {
|
||||
directive := Directive{}
|
||||
directive.setEscapeToken(string(DefaultEscapeToken))
|
||||
return &directive
|
||||
// newDefaultDirectives returns a new directives structure with the default escapeToken token
|
||||
func newDefaultDirectives() *directives {
|
||||
d := &directives{
|
||||
seen: map[string]struct{}{},
|
||||
}
|
||||
d.setEscapeToken(string(DefaultEscapeToken))
|
||||
return d
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -146,7 +166,7 @@ func init() {
|
|||
// reformulating the arguments according to the rules in the parser
|
||||
// functions. Errors are propagated up by Parse() and the resulting AST can
|
||||
// be incorporated directly into the existing AST as a next.
|
||||
dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){
|
||||
dispatch = map[string]func(string, *directives) (*Node, map[string]bool, error){
|
||||
command.Add: parseMaybeJSONToList,
|
||||
command.Arg: parseNameOrNameVal,
|
||||
command.Cmd: parseMaybeJSON,
|
||||
|
@ -171,7 +191,7 @@ func init() {
|
|||
// newNodeFromLine splits the line into parts, and dispatches to a function
|
||||
// based on the command and command arguments. A Node is created from the
|
||||
// result of the dispatch.
|
||||
func newNodeFromLine(line string, directive *Directive) (*Node, error) {
|
||||
func newNodeFromLine(line string, d *directives) (*Node, error) {
|
||||
cmd, flags, args, err := splitCommand(line)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -182,7 +202,7 @@ func newNodeFromLine(line string, directive *Directive) (*Node, error) {
|
|||
if fn == nil {
|
||||
fn = parseIgnore
|
||||
}
|
||||
next, attrs, err := fn(args, directive)
|
||||
next, attrs, err := fn(args, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -214,7 +234,7 @@ func (r *Result) PrintWarnings(out io.Writer) {
|
|||
// Parse reads lines from a Reader, parses the lines into an AST and returns
|
||||
// the AST and escape token
|
||||
func Parse(rwc io.Reader) (*Result, error) {
|
||||
d := NewDefaultDirective()
|
||||
d := newDefaultDirectives()
|
||||
currentLine := 0
|
||||
root := &Node{StartLine: -1}
|
||||
scanner := bufio.NewScanner(rwc)
|
||||
|
@ -229,7 +249,7 @@ func Parse(rwc io.Reader) (*Result, error) {
|
|||
}
|
||||
bytesRead, err = processLine(d, bytesRead, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, withLocation(err, currentLine, 0)
|
||||
}
|
||||
currentLine++
|
||||
|
||||
|
@ -243,7 +263,7 @@ func Parse(rwc io.Reader) (*Result, error) {
|
|||
for !isEndOfLine && scanner.Scan() {
|
||||
bytesRead, err := processLine(d, scanner.Bytes(), false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, withLocation(err, currentLine, 0)
|
||||
}
|
||||
currentLine++
|
||||
|
||||
|
@ -267,7 +287,7 @@ func Parse(rwc io.Reader) (*Result, error) {
|
|||
|
||||
child, err := newNodeFromLine(line, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, withLocation(err, startLine, currentLine)
|
||||
}
|
||||
root.AddChild(child, startLine, currentLine)
|
||||
}
|
||||
|
@ -277,18 +297,18 @@ func Parse(rwc io.Reader) (*Result, error) {
|
|||
}
|
||||
|
||||
if root.StartLine < 0 {
|
||||
return nil, errors.New("file with no instructions.")
|
||||
return nil, withLocation(errors.New("file with no instructions"), currentLine, 0)
|
||||
}
|
||||
|
||||
return &Result{
|
||||
AST: root,
|
||||
Warnings: warnings,
|
||||
EscapeToken: d.escapeToken,
|
||||
}, handleScannerError(scanner.Err())
|
||||
}, withLocation(handleScannerError(scanner.Err()), currentLine, 0)
|
||||
}
|
||||
|
||||
func trimComments(src []byte) []byte {
|
||||
return tokenComment.ReplaceAll(src, []byte{})
|
||||
return reComment.ReplaceAll(src, []byte{})
|
||||
}
|
||||
|
||||
func trimWhitespace(src []byte) []byte {
|
||||
|
@ -296,7 +316,7 @@ func trimWhitespace(src []byte) []byte {
|
|||
}
|
||||
|
||||
func isComment(line []byte) bool {
|
||||
return tokenComment.Match(trimWhitespace(line))
|
||||
return reComment.Match(trimWhitespace(line))
|
||||
}
|
||||
|
||||
func isEmptyContinuationLine(line []byte) bool {
|
||||
|
@ -305,7 +325,7 @@ func isEmptyContinuationLine(line []byte) bool {
|
|||
|
||||
var utf8bom = []byte{0xEF, 0xBB, 0xBF}
|
||||
|
||||
func trimContinuationCharacter(line string, d *Directive) (string, bool) {
|
||||
func trimContinuationCharacter(line string, d *directives) (string, bool) {
|
||||
if d.lineContinuationRegex.MatchString(line) {
|
||||
line = d.lineContinuationRegex.ReplaceAllString(line, "")
|
||||
return line, false
|
||||
|
@ -315,7 +335,7 @@ func trimContinuationCharacter(line string, d *Directive) (string, bool) {
|
|||
|
||||
// TODO: remove stripLeftWhitespace after deprecation period. It seems silly
|
||||
// to preserve whitespace on continuation lines. Why is that done?
|
||||
func processLine(d *Directive, token []byte, stripLeftWhitespace bool) ([]byte, error) {
|
||||
func processLine(d *directives, token []byte, stripLeftWhitespace bool) ([]byte, error) {
|
||||
if stripLeftWhitespace {
|
||||
token = trimWhitespace(token)
|
||||
}
|
||||
|
|
2
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go
generated
vendored
2
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go
generated
vendored
|
@ -12,7 +12,7 @@ func splitCommand(line string) (string, []string, string, error) {
|
|||
var flags []string
|
||||
|
||||
// Make sure we get the same results irrespective of leading/trailing spaces
|
||||
cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2)
|
||||
cmdline := reWhitespace.Split(strings.TrimSpace(line), 2)
|
||||
cmd := strings.ToLower(cmdline[0])
|
||||
|
||||
if len(cmdline) == 2 {
|
||||
|
|
25
vendor/github.com/moby/buildkit/frontend/gateway/gateway.go
generated
vendored
25
vendor/github.com/moby/buildkit/frontend/gateway/gateway.go
generated
vendored
|
@ -13,6 +13,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
gogotypes "github.com/gogo/protobuf/types"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
apitypes "github.com/moby/buildkit/api/types"
|
||||
"github.com/moby/buildkit/cache"
|
||||
cacheutil "github.com/moby/buildkit/cache/util"
|
||||
|
@ -27,6 +29,7 @@ import (
|
|||
"github.com/moby/buildkit/solver"
|
||||
opspb "github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/apicaps"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"github.com/moby/buildkit/worker"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
@ -218,7 +221,7 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten
|
|||
err = llbBridge.Exec(ctx, meta, rootFS, lbf.Stdin, lbf.Stdout, os.Stderr)
|
||||
|
||||
if err != nil {
|
||||
if errors.Cause(err) == context.Canceled && lbf.isErrServerClosed {
|
||||
if errors.Is(err, context.Canceled) && lbf.isErrServerClosed {
|
||||
err = errors.Errorf("frontend grpc server closed unexpectedly")
|
||||
}
|
||||
// An existing error (set via Return rpc) takes
|
||||
|
@ -309,7 +312,7 @@ func NewBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridg
|
|||
func newLLBBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos, inputs map[string]*opspb.Definition) (*llbBridgeForwarder, context.Context, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
lbf := NewBridgeForwarder(ctx, llbBridge, workers, inputs)
|
||||
server := grpc.NewServer()
|
||||
server := grpc.NewServer(grpc.UnaryInterceptor(grpcerrors.UnaryServerInterceptor), grpc.StreamInterceptor(grpcerrors.StreamServerInterceptor))
|
||||
grpc_health_v1.RegisterHealthServer(server, health.NewServer())
|
||||
pb.RegisterLLBBridgeServer(server, lbf)
|
||||
|
||||
|
@ -472,7 +475,9 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest)
|
|||
return nil, errors.Errorf("solve did not return default result")
|
||||
}
|
||||
|
||||
pbRes := &pb.Result{}
|
||||
pbRes := &pb.Result{
|
||||
Metadata: res.Metadata,
|
||||
}
|
||||
var defaultID string
|
||||
|
||||
lbf.mu.Lock()
|
||||
|
@ -668,11 +673,11 @@ func (lbf *llbBridgeForwarder) Ping(context.Context, *pb.PingRequest) (*pb.PongR
|
|||
|
||||
func (lbf *llbBridgeForwarder) Return(ctx context.Context, in *pb.ReturnRequest) (*pb.ReturnResponse, error) {
|
||||
if in.Error != nil {
|
||||
return lbf.setResult(nil, status.ErrorProto(&spb.Status{
|
||||
return lbf.setResult(nil, grpcerrors.FromGRPC(status.ErrorProto(&spb.Status{
|
||||
Code: in.Error.Code,
|
||||
Message: in.Error.Message,
|
||||
// Details: in.Error.Details,
|
||||
}))
|
||||
Details: convertGogoAny(in.Error.Details),
|
||||
})))
|
||||
} else {
|
||||
r := &frontend.Result{
|
||||
Metadata: in.Result.Metadata,
|
||||
|
@ -752,3 +757,11 @@ type markTypeFrontend struct{}
|
|||
func (*markTypeFrontend) SetImageOption(ii *llb.ImageInfo) {
|
||||
ii.RecordType = string(client.UsageRecordTypeFrontend)
|
||||
}
|
||||
|
||||
func convertGogoAny(in []*gogotypes.Any) []*any.Any {
|
||||
out := make([]*any.Any, len(in))
|
||||
for i := range in {
|
||||
out[i] = &any.Any{TypeUrl: in[i].TypeUrl, Value: in[i].Value}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
|
19
vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go
generated
vendored
19
vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go
generated
vendored
|
@ -10,11 +10,14 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/gogo/googleapis/google/rpc"
|
||||
gogotypes "github.com/gogo/protobuf/types"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/frontend/gateway/client"
|
||||
pb "github.com/moby/buildkit/frontend/gateway/pb"
|
||||
opspb "github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/apicaps"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
fstypes "github.com/tonistiigi/fsutil/types"
|
||||
|
@ -29,7 +32,7 @@ type GrpcClient interface {
|
|||
}
|
||||
|
||||
func New(ctx context.Context, opts map[string]string, session, product string, c pb.LLBBridgeClient, w []client.WorkerInfo) (GrpcClient, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
resp, err := c.Ping(ctx, &pb.PingRequest{})
|
||||
if err != nil {
|
||||
|
@ -150,12 +153,12 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro
|
|||
}
|
||||
}
|
||||
if retError != nil {
|
||||
st, _ := status.FromError(errors.Cause(retError))
|
||||
st, _ := status.FromError(grpcerrors.ToGRPC(retError))
|
||||
stp := st.Proto()
|
||||
req.Error = &rpc.Status{
|
||||
Code: stp.Code,
|
||||
Message: stp.Message,
|
||||
// Details: stp.Details,
|
||||
Details: convertToGogoAny(stp.Details),
|
||||
}
|
||||
}
|
||||
if _, err := c.client.Return(ctx, req); err != nil && retError == nil {
|
||||
|
@ -503,7 +506,7 @@ func grpcClientConn(ctx context.Context) (context.Context, *grpc.ClientConn, err
|
|||
return stdioConn(), nil
|
||||
})
|
||||
|
||||
cc, err := grpc.DialContext(ctx, "", dialOpt, grpc.WithInsecure())
|
||||
cc, err := grpc.DialContext(ctx, "", dialOpt, grpc.WithInsecure(), grpc.WithUnaryInterceptor(grpcerrors.UnaryClientInterceptor), grpc.WithStreamInterceptor(grpcerrors.StreamClientInterceptor))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to create grpc client")
|
||||
}
|
||||
|
@ -589,3 +592,11 @@ func workers() []client.WorkerInfo {
|
|||
func product() string {
|
||||
return os.Getenv("BUILDKIT_EXPORTEDPRODUCT")
|
||||
}
|
||||
|
||||
func convertToGogoAny(in []*any.Any) []*gogotypes.Any {
|
||||
out := make([]*gogotypes.Any, len(in))
|
||||
for i := range in {
|
||||
out[i] = &gogotypes.Any{TypeUrl: in[i].TypeUrl, Value: in[i].Value}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
|
10
vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go
generated
vendored
10
vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go
generated
vendored
|
@ -32,6 +32,9 @@ const (
|
|||
// CapFrontendInputs is a capability to request frontend inputs from the
|
||||
// LLBBridge GRPC server.
|
||||
CapFrontendInputs apicaps.CapID = "frontend.inputs"
|
||||
|
||||
// CapGatewaySolveMetadata can be used to check if solve calls from gateway reliably return metadata
|
||||
CapGatewaySolveMetadata apicaps.CapID = "gateway.solve.metadata"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -126,4 +129,11 @@ func init() {
|
|||
Enabled: true,
|
||||
Status: apicaps.CapStatusExperimental,
|
||||
})
|
||||
|
||||
Caps.Init(apicaps.Cap{
|
||||
ID: CapGatewaySolveMetadata,
|
||||
Name: "gateway metadata",
|
||||
Enabled: true,
|
||||
Status: apicaps.CapStatusExperimental,
|
||||
})
|
||||
}
|
||||
|
|
10
vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto
generated
vendored
10
vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto
generated
vendored
|
@ -32,7 +32,7 @@ service LLBBridge {
|
|||
|
||||
message Result {
|
||||
oneof result {
|
||||
// Deprecated non-array refs.
|
||||
// Deprecated non-array refs.
|
||||
string refDeprecated = 1;
|
||||
RefMapDeprecated refsDeprecated = 2;
|
||||
|
||||
|
@ -67,7 +67,7 @@ message InputsRequest {
|
|||
}
|
||||
|
||||
message InputsResponse {
|
||||
map<string, pb.Definition> Definitions = 1;
|
||||
map<string, pb.Definition> Definitions = 1;
|
||||
}
|
||||
|
||||
message ResolveImageConfigRequest {
|
||||
|
@ -87,9 +87,9 @@ message SolveRequest {
|
|||
string Frontend = 2;
|
||||
map<string, string> FrontendOpt = 3;
|
||||
// ImportCacheRefsDeprecated is deprecated in favor or the new Imports since BuildKit v0.4.0.
|
||||
// When ImportCacheRefsDeprecated is set, the solver appends
|
||||
// {.Type = "registry", .Attrs = {"ref": importCacheRef}}
|
||||
// for each of the ImportCacheRefs entry to CacheImports for compatibility. (planned to be removed)
|
||||
// When ImportCacheRefsDeprecated is set, the solver appends
|
||||
// {.Type = "registry", .Attrs = {"ref": importCacheRef}}
|
||||
// for each of the ImportCacheRefs entry to CacheImports for compatibility. (planned to be removed)
|
||||
repeated string ImportCacheRefsDeprecated = 4;
|
||||
bool allowResultReturn = 5;
|
||||
bool allowResultArrayRef = 6;
|
||||
|
|
61
vendor/github.com/moby/buildkit/go.mod
generated
vendored
61
vendor/github.com/moby/buildkit/go.mod
generated
vendored
|
@ -3,24 +3,22 @@ module github.com/moby/buildkit
|
|||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/AkihiroSuda/containerd-fuse-overlayfs v0.0.0-20200220082720-bb896865146c
|
||||
github.com/AkihiroSuda/containerd-fuse-overlayfs v0.0.0-20200512015515-32086ef23a5a
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5
|
||||
github.com/Microsoft/hcsshim v0.8.7 // indirect
|
||||
github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7 // indirect
|
||||
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58 // indirect
|
||||
github.com/containerd/cgroups v0.0.0-20200217135630-d732e370d46d // indirect
|
||||
github.com/containerd/console v0.0.0-20191219165238-8375c3424e4d
|
||||
github.com/containerd/cgroups v0.0.0-20200327175542-b44481373989 // indirect
|
||||
github.com/containerd/console v1.0.0
|
||||
github.com/containerd/containerd v1.4.0-0
|
||||
github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41
|
||||
github.com/containerd/fifo v0.0.0-20191213151349-ff969a566b00 // indirect
|
||||
github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb
|
||||
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b // indirect
|
||||
github.com/containerd/go-cni v0.0.0-20200107172653-c154a49e2c75
|
||||
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328
|
||||
github.com/containerd/ttrpc v0.0.0-20200121165050-0be804eadb15 // indirect
|
||||
github.com/containerd/typeurl v0.0.0-20200205145503-b45ef1f1f737 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.0.0
|
||||
github.com/docker/cli v0.0.0-20200227165822-2298e6a3fe24
|
||||
github.com/docker/distribution v0.0.0-20200223014041-6b972e50feee
|
||||
github.com/docker/distribution v2.7.1+incompatible
|
||||
github.com/docker/docker v0.0.0
|
||||
github.com/docker/docker-credential-helpers v0.6.0 // indirect
|
||||
github.com/docker/go-connections v0.3.0
|
||||
|
@ -29,55 +27,52 @@ require (
|
|||
github.com/gogo/googleapis v1.3.2
|
||||
github.com/gogo/protobuf v1.3.1
|
||||
github.com/golang/protobuf v1.3.3
|
||||
github.com/google/go-cmp v0.3.1
|
||||
github.com/google/go-cmp v0.4.0
|
||||
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9
|
||||
github.com/google/uuid v1.1.1 // indirect
|
||||
github.com/gorilla/mux v1.7.4 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.2.0
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0
|
||||
github.com/hashicorp/golang-lru v0.5.1
|
||||
github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c // indirect
|
||||
github.com/imdario/mergo v0.3.7 // indirect
|
||||
github.com/imdario/mergo v0.3.9 // indirect
|
||||
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect
|
||||
github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea
|
||||
github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452
|
||||
github.com/mitchellh/hashstructure v1.0.0
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/opencontainers/runc v1.0.0-rc9.0.20200221051241-688cf6d43cc4
|
||||
github.com/opencontainers/runtime-spec v1.0.1
|
||||
github.com/opencontainers/selinux v1.3.2 // indirect
|
||||
github.com/opencontainers/runc v1.0.0-rc10
|
||||
github.com/opencontainers/runtime-spec v1.0.2
|
||||
github.com/opencontainers/selinux v1.5.1 // indirect
|
||||
github.com/opentracing-contrib/go-stdlib v0.0.0-20171029140428-b1a47cfbdd75
|
||||
github.com/opentracing/opentracing-go v0.0.0-20171003133519-1361b9cd60be
|
||||
github.com/opentracing/opentracing-go v1.1.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/profile v1.2.1
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/stretchr/testify v1.4.0
|
||||
github.com/stretchr/testify v1.5.1
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 // indirect
|
||||
github.com/tonistiigi/fsutil v0.0.0-20200326231323-c2c7d7b0e144
|
||||
github.com/tonistiigi/fsutil v0.0.0-20200512175118-ae3a8d753069
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
|
||||
github.com/uber/jaeger-client-go v0.0.0-20180103221425-e02c85f9069e
|
||||
github.com/uber/jaeger-client-go v2.11.2+incompatible
|
||||
github.com/uber/jaeger-lib v1.2.1 // indirect
|
||||
github.com/urfave/cli v1.22.2
|
||||
github.com/vishvananda/netlink v1.0.0 // indirect
|
||||
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect
|
||||
github.com/vishvananda/netlink v1.1.0 // indirect
|
||||
go.etcd.io/bbolt v1.3.3
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
|
||||
google.golang.org/genproto v0.0.0-20200227132054-3f1135a288c9
|
||||
google.golang.org/grpc v1.27.1
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
gotest.tools v2.2.0+incompatible
|
||||
gotest.tools/v3 v3.0.2 // indirect
|
||||
)
|
||||
|
||||
replace github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe
|
||||
|
||||
replace github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305
|
||||
|
||||
replace github.com/containerd/containerd => github.com/containerd/containerd v1.3.1-0.20200227195959-4d242818bf55
|
||||
|
||||
replace github.com/docker/docker => github.com/docker/docker v1.4.2-0.20200227233006-38f52c9fec82
|
||||
replace (
|
||||
github.com/containerd/containerd => github.com/containerd/containerd v1.3.1-0.20200512144102-f13ba8f2f2fd
|
||||
github.com/docker/docker => github.com/docker/docker v17.12.0-ce-rc1.0.20200310163718-4634ce647cf2+incompatible
|
||||
github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe
|
||||
github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305
|
||||
)
|
||||
|
|
7
vendor/github.com/moby/buildkit/session/auth/auth.go
generated
vendored
7
vendor/github.com/moby/buildkit/session/auth/auth.go
generated
vendored
|
@ -4,9 +4,8 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func CredentialsFunc(ctx context.Context, c session.Caller) func(string) (string, string, error) {
|
||||
|
@ -17,10 +16,10 @@ func CredentialsFunc(ctx context.Context, c session.Caller) func(string) (string
|
|||
Host: host,
|
||||
})
|
||||
if err != nil {
|
||||
if st, ok := status.FromError(errors.Cause(err)); ok && st.Code() == codes.Unimplemented {
|
||||
if grpcerrors.Code(err) == codes.Unimplemented {
|
||||
return "", "", nil
|
||||
}
|
||||
return "", "", errors.WithStack(err)
|
||||
return "", "", err
|
||||
}
|
||||
return resp.Username, resp.Secret, nil
|
||||
}
|
||||
|
|
4
vendor/github.com/moby/buildkit/session/filesync/diffcopy.go
generated
vendored
4
vendor/github.com/moby/buildkit/session/filesync/diffcopy.go
generated
vendored
|
@ -41,7 +41,7 @@ type streamWriterCloser struct {
|
|||
func (wc *streamWriterCloser) Write(dt []byte) (int, error) {
|
||||
if err := wc.ClientStream.SendMsg(&BytesMessage{Data: dt}); err != nil {
|
||||
// SendMsg return EOF on remote errors
|
||||
if errors.Cause(err) == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
if err := errors.WithStack(wc.ClientStream.RecvMsg(struct{}{})); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ func writeTargetFile(ds grpc.Stream, wc io.WriteCloser) error {
|
|||
for {
|
||||
bm := BytesMessage{}
|
||||
if err := ds.RecvMsg(&bm); err != nil {
|
||||
if errors.Cause(err) == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return nil
|
||||
}
|
||||
return errors.WithStack(err)
|
||||
|
|
9
vendor/github.com/moby/buildkit/session/filesync/filesync.go
generated
vendored
9
vendor/github.com/moby/buildkit/session/filesync/filesync.go
generated
vendored
|
@ -255,7 +255,7 @@ func (sp *fsSyncTarget) Register(server *grpc.Server) {
|
|||
RegisterFileSendServer(server, sp)
|
||||
}
|
||||
|
||||
func (sp *fsSyncTarget) DiffCopy(stream FileSend_DiffCopyServer) error {
|
||||
func (sp *fsSyncTarget) DiffCopy(stream FileSend_DiffCopyServer) (err error) {
|
||||
if sp.outdir != "" {
|
||||
return syncTargetDiffCopy(stream, sp.outdir)
|
||||
}
|
||||
|
@ -277,7 +277,12 @@ func (sp *fsSyncTarget) DiffCopy(stream FileSend_DiffCopyServer) error {
|
|||
if wc == nil {
|
||||
return status.Errorf(codes.AlreadyExists, "target already exists")
|
||||
}
|
||||
defer wc.Close()
|
||||
defer func() {
|
||||
err1 := wc.Close()
|
||||
if err != nil {
|
||||
err = err1
|
||||
}
|
||||
}()
|
||||
return writeTargetFile(stream, wc)
|
||||
}
|
||||
|
||||
|
|
26
vendor/github.com/moby/buildkit/session/grpc.go
generated
vendored
26
vendor/github.com/moby/buildkit/session/grpc.go
generated
vendored
|
@ -6,7 +6,9 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -25,6 +27,9 @@ func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) {
|
|||
}
|
||||
|
||||
func grpcClientConn(ctx context.Context, conn net.Conn) (context.Context, *grpc.ClientConn, error) {
|
||||
var unary []grpc.UnaryClientInterceptor
|
||||
var stream []grpc.StreamClientInterceptor
|
||||
|
||||
var dialCount int64
|
||||
dialer := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) {
|
||||
if c := atomic.AddInt64(&dialCount, 1); c > 1 {
|
||||
|
@ -40,10 +45,23 @@ func grpcClientConn(ctx context.Context, conn net.Conn) (context.Context, *grpc.
|
|||
|
||||
if span := opentracing.SpanFromContext(ctx); span != nil {
|
||||
tracer := span.Tracer()
|
||||
dialOpts = append(dialOpts,
|
||||
grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer, traceFilter())),
|
||||
grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer, traceFilter())),
|
||||
)
|
||||
unary = append(unary, otgrpc.OpenTracingClientInterceptor(tracer, traceFilter()))
|
||||
stream = append(stream, otgrpc.OpenTracingStreamClientInterceptor(tracer, traceFilter()))
|
||||
}
|
||||
|
||||
unary = append(unary, grpcerrors.UnaryClientInterceptor)
|
||||
stream = append(stream, grpcerrors.StreamClientInterceptor)
|
||||
|
||||
if len(unary) == 1 {
|
||||
dialOpts = append(dialOpts, grpc.WithUnaryInterceptor(unary[0]))
|
||||
} else if len(unary) > 1 {
|
||||
dialOpts = append(dialOpts, grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(unary...)))
|
||||
}
|
||||
|
||||
if len(stream) == 1 {
|
||||
dialOpts = append(dialOpts, grpc.WithStreamInterceptor(stream[0]))
|
||||
} else if len(stream) > 1 {
|
||||
dialOpts = append(dialOpts, grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(stream...)))
|
||||
}
|
||||
|
||||
cc, err := grpc.DialContext(ctx, "", dialOpts...)
|
||||
|
|
6
vendor/github.com/moby/buildkit/session/secrets/secrets.go
generated
vendored
6
vendor/github.com/moby/buildkit/session/secrets/secrets.go
generated
vendored
|
@ -4,9 +4,9 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type SecretStore interface {
|
||||
|
@ -21,10 +21,10 @@ func GetSecret(ctx context.Context, c session.Caller, id string) ([]byte, error)
|
|||
ID: id,
|
||||
})
|
||||
if err != nil {
|
||||
if st, ok := status.FromError(errors.Cause(err)); ok && (st.Code() == codes.Unimplemented || st.Code() == codes.NotFound) {
|
||||
if code := grpcerrors.Code(err); code == codes.Unimplemented || code == codes.NotFound {
|
||||
return nil, errors.Wrapf(ErrNotFound, "secret %s not found", id)
|
||||
}
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, err
|
||||
}
|
||||
return resp.Data, nil
|
||||
}
|
||||
|
|
26
vendor/github.com/moby/buildkit/session/session.go
generated
vendored
26
vendor/github.com/moby/buildkit/session/session.go
generated
vendored
|
@ -5,8 +5,10 @@ import (
|
|||
"net"
|
||||
"strings"
|
||||
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
|
@ -45,13 +47,29 @@ type Session struct {
|
|||
func NewSession(ctx context.Context, name, sharedKey string) (*Session, error) {
|
||||
id := identity.NewID()
|
||||
|
||||
var unary []grpc.UnaryServerInterceptor
|
||||
var stream []grpc.StreamServerInterceptor
|
||||
|
||||
serverOpts := []grpc.ServerOption{}
|
||||
if span := opentracing.SpanFromContext(ctx); span != nil {
|
||||
tracer := span.Tracer()
|
||||
serverOpts = []grpc.ServerOption{
|
||||
grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(span.Tracer(), traceFilter())),
|
||||
grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer, traceFilter())),
|
||||
}
|
||||
unary = append(unary, otgrpc.OpenTracingServerInterceptor(tracer, traceFilter()))
|
||||
stream = append(stream, otgrpc.OpenTracingStreamServerInterceptor(span.Tracer(), traceFilter()))
|
||||
}
|
||||
|
||||
unary = append(unary, grpcerrors.UnaryServerInterceptor)
|
||||
stream = append(stream, grpcerrors.StreamServerInterceptor)
|
||||
|
||||
if len(unary) == 1 {
|
||||
serverOpts = append(serverOpts, grpc.UnaryInterceptor(unary[0]))
|
||||
} else if len(unary) > 1 {
|
||||
serverOpts = append(serverOpts, grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unary...)))
|
||||
}
|
||||
|
||||
if len(stream) == 1 {
|
||||
serverOpts = append(serverOpts, grpc.StreamInterceptor(stream[0]))
|
||||
} else if len(stream) > 1 {
|
||||
serverOpts = append(serverOpts, grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(stream...)))
|
||||
}
|
||||
|
||||
s := &Session{
|
||||
|
|
2
vendor/github.com/moby/buildkit/snapshot/containerd/content.go
generated
vendored
2
vendor/github.com/moby/buildkit/snapshot/containerd/content.go
generated
vendored
|
@ -5,7 +5,7 @@ import (
|
|||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
|
44
vendor/github.com/moby/buildkit/solver/edge.go
generated
vendored
44
vendor/github.com/moby/buildkit/solver/edge.go
generated
vendored
|
@ -26,12 +26,13 @@ func (t edgeStatusType) String() string {
|
|||
|
||||
func newEdge(ed Edge, op activeOp, index *edgeIndex) *edge {
|
||||
e := &edge{
|
||||
edge: ed,
|
||||
op: op,
|
||||
depRequests: map[pipe.Receiver]*dep{},
|
||||
keyMap: map[string]struct{}{},
|
||||
cacheRecords: map[string]*CacheRecord{},
|
||||
index: index,
|
||||
edge: ed,
|
||||
op: op,
|
||||
depRequests: map[pipe.Receiver]*dep{},
|
||||
keyMap: map[string]struct{}{},
|
||||
cacheRecords: map[string]*CacheRecord{},
|
||||
cacheRecordsLoaded: map[string]struct{}{},
|
||||
index: index,
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
@ -44,14 +45,16 @@ type edge struct {
|
|||
depRequests map[pipe.Receiver]*dep
|
||||
deps []*dep
|
||||
|
||||
cacheMapReq pipe.Receiver
|
||||
cacheMapDone bool
|
||||
cacheMapIndex int
|
||||
cacheMapDigests []digest.Digest
|
||||
execReq pipe.Receiver
|
||||
err error
|
||||
cacheRecords map[string]*CacheRecord
|
||||
keyMap map[string]struct{}
|
||||
cacheMapReq pipe.Receiver
|
||||
cacheMapDone bool
|
||||
cacheMapIndex int
|
||||
cacheMapDigests []digest.Digest
|
||||
execReq pipe.Receiver
|
||||
execCacheLoad bool
|
||||
err error
|
||||
cacheRecords map[string]*CacheRecord
|
||||
cacheRecordsLoaded map[string]struct{}
|
||||
keyMap map[string]struct{}
|
||||
|
||||
noCacheMatchPossible bool
|
||||
allDepsCompletedCacheFast bool
|
||||
|
@ -425,7 +428,11 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) {
|
|||
if upt == e.execReq && upt.Status().Completed {
|
||||
if err := upt.Status().Err; err != nil {
|
||||
e.execReq = nil
|
||||
if !upt.Status().Canceled && e.err == nil {
|
||||
if e.execCacheLoad {
|
||||
for k := range e.cacheRecordsLoaded {
|
||||
delete(e.cacheRecords, k)
|
||||
}
|
||||
} else if !upt.Status().Canceled && e.err == nil {
|
||||
e.err = err
|
||||
}
|
||||
} else {
|
||||
|
@ -561,7 +568,9 @@ func (e *edge) recalcCurrentState() {
|
|||
}
|
||||
|
||||
for _, r := range records {
|
||||
e.cacheRecords[r.ID] = r
|
||||
if _, ok := e.cacheRecordsLoaded[r.ID]; !ok {
|
||||
e.cacheRecords[r.ID] = r
|
||||
}
|
||||
}
|
||||
|
||||
e.keys = append(e.keys, e.makeExportable(mergedKey, records))
|
||||
|
@ -821,6 +830,7 @@ func (e *edge) execIfPossible(f *pipeFactory) bool {
|
|||
return true
|
||||
}
|
||||
e.execReq = f.NewFuncRequest(e.loadCache)
|
||||
e.execCacheLoad = true
|
||||
for req := range e.depRequests {
|
||||
req.Cancel()
|
||||
}
|
||||
|
@ -831,6 +841,7 @@ func (e *edge) execIfPossible(f *pipeFactory) bool {
|
|||
return true
|
||||
}
|
||||
e.execReq = f.NewFuncRequest(e.execOp)
|
||||
e.execCacheLoad = false
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
@ -851,6 +862,7 @@ func (e *edge) loadCache(ctx context.Context) (interface{}, error) {
|
|||
}
|
||||
|
||||
rec := getBestResult(recs)
|
||||
e.cacheRecordsLoaded[rec.ID] = struct{}{}
|
||||
|
||||
logrus.Debugf("load cache for %s with %s", e.edge.Vertex.Name(), rec.ID)
|
||||
res, err := e.op.LoadCache(ctx, rec)
|
||||
|
|
129
vendor/github.com/moby/buildkit/solver/errdefs/errdefs.pb.go
generated
vendored
Normal file
129
vendor/github.com/moby/buildkit/solver/errdefs/errdefs.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,129 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: errdefs.proto
|
||||
|
||||
package errdefs
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
pb "github.com/moby/buildkit/solver/pb"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type Vertex struct {
|
||||
Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Vertex) Reset() { *m = Vertex{} }
|
||||
func (m *Vertex) String() string { return proto.CompactTextString(m) }
|
||||
func (*Vertex) ProtoMessage() {}
|
||||
func (*Vertex) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_689dc58a5060aff5, []int{0}
|
||||
}
|
||||
func (m *Vertex) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Vertex.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Vertex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Vertex.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Vertex) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Vertex.Merge(m, src)
|
||||
}
|
||||
func (m *Vertex) XXX_Size() int {
|
||||
return xxx_messageInfo_Vertex.Size(m)
|
||||
}
|
||||
func (m *Vertex) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Vertex.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Vertex proto.InternalMessageInfo
|
||||
|
||||
func (m *Vertex) GetDigest() string {
|
||||
if m != nil {
|
||||
return m.Digest
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Source struct {
|
||||
Info *pb.SourceInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"`
|
||||
Ranges []*pb.Range `protobuf:"bytes,2,rep,name=ranges,proto3" json:"ranges,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Source) Reset() { *m = Source{} }
|
||||
func (m *Source) String() string { return proto.CompactTextString(m) }
|
||||
func (*Source) ProtoMessage() {}
|
||||
func (*Source) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_689dc58a5060aff5, []int{1}
|
||||
}
|
||||
func (m *Source) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Source.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Source) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Source.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Source) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Source.Merge(m, src)
|
||||
}
|
||||
func (m *Source) XXX_Size() int {
|
||||
return xxx_messageInfo_Source.Size(m)
|
||||
}
|
||||
func (m *Source) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Source.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Source proto.InternalMessageInfo
|
||||
|
||||
func (m *Source) GetInfo() *pb.SourceInfo {
|
||||
if m != nil {
|
||||
return m.Info
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Source) GetRanges() []*pb.Range {
|
||||
if m != nil {
|
||||
return m.Ranges
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Vertex)(nil), "errdefs.Vertex")
|
||||
proto.RegisterType((*Source)(nil), "errdefs.Source")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("errdefs.proto", fileDescriptor_689dc58a5060aff5) }
|
||||
|
||||
var fileDescriptor_689dc58a5060aff5 = []byte{
|
||||
// 177 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x2c, 0xcd, 0xc1, 0x8a, 0x83, 0x30,
|
||||
0x10, 0x80, 0x61, 0xdc, 0x5d, 0xb2, 0x18, 0xd9, 0x3d, 0xe4, 0x50, 0xa4, 0x27, 0xeb, 0xc9, 0x43,
|
||||
0x49, 0xc0, 0x3e, 0x45, 0x4f, 0x85, 0x14, 0x7a, 0x6f, 0x74, 0xb4, 0xa1, 0xea, 0x84, 0x49, 0x2c,
|
||||
0xed, 0xdb, 0x17, 0x6d, 0x8e, 0xff, 0x7c, 0x33, 0x0c, 0xff, 0x03, 0xa2, 0x16, 0x3a, 0x2f, 0x1d,
|
||||
0x61, 0x40, 0xf1, 0x1b, 0x73, 0xbb, 0xef, 0x6d, 0xb8, 0xcd, 0x46, 0x36, 0x38, 0xaa, 0x11, 0xcd,
|
||||
0x4b, 0x99, 0xd9, 0x0e, 0xed, 0xdd, 0x06, 0xe5, 0x71, 0x78, 0x00, 0x29, 0x67, 0x14, 0xba, 0x78,
|
||||
0x56, 0x16, 0x9c, 0x5d, 0x80, 0x02, 0x3c, 0xc5, 0x86, 0xb3, 0xd6, 0xf6, 0xe0, 0x43, 0x9e, 0x14,
|
||||
0x49, 0x95, 0xea, 0x58, 0xe5, 0x89, 0xb3, 0x33, 0xce, 0xd4, 0x80, 0x28, 0xf9, 0x8f, 0x9d, 0x3a,
|
||||
0x5c, 0x3d, 0xab, 0xff, 0xa5, 0x33, 0xf2, 0x23, 0xc7, 0xa9, 0x43, 0xbd, 0x9a, 0xd8, 0x71, 0x46,
|
||||
0xd7, 0xa9, 0x07, 0x9f, 0x7f, 0x15, 0xdf, 0x55, 0x56, 0xa7, 0xcb, 0x96, 0x5e, 0x26, 0x3a, 0x82,
|
||||
0x61, 0xeb, 0xe7, 0xc3, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x93, 0xb5, 0x8b, 0x2a, 0xc1, 0x00, 0x00,
|
||||
0x00,
|
||||
}
|
14
vendor/github.com/moby/buildkit/solver/errdefs/errdefs.proto
generated
vendored
Normal file
14
vendor/github.com/moby/buildkit/solver/errdefs/errdefs.proto
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package errdefs;
|
||||
|
||||
import "github.com/moby/buildkit/solver/pb/ops.proto";
|
||||
|
||||
message Vertex {
|
||||
string digest = 1;
|
||||
}
|
||||
|
||||
message Source {
|
||||
pb.SourceInfo info = 1;
|
||||
repeated pb.Range ranges = 2;
|
||||
}
|
3
vendor/github.com/moby/buildkit/solver/errdefs/generate.go
generated
vendored
Normal file
3
vendor/github.com/moby/buildkit/solver/errdefs/generate.go
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
package errdefs
|
||||
|
||||
//go:generate protoc -I=. -I=../../vendor/ -I=../../../../../ --gogo_out=. errdefs.proto
|
128
vendor/github.com/moby/buildkit/solver/errdefs/source.go
generated
vendored
Normal file
128
vendor/github.com/moby/buildkit/solver/errdefs/source.go
generated
vendored
Normal file
|
@ -0,0 +1,128 @@
|
|||
package errdefs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
pb "github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func WithSource(err error, src Source) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &ErrorSource{Source: src, error: err}
|
||||
}
|
||||
|
||||
type ErrorSource struct {
|
||||
Source
|
||||
error
|
||||
}
|
||||
|
||||
func (e *ErrorSource) Unwrap() error {
|
||||
return e.error
|
||||
}
|
||||
|
||||
func (e *ErrorSource) ToProto() grpcerrors.TypedErrorProto {
|
||||
return &e.Source
|
||||
}
|
||||
|
||||
func Sources(err error) []*Source {
|
||||
var out []*Source
|
||||
var es *ErrorSource
|
||||
if errors.As(err, &es) {
|
||||
out = Sources(es.Unwrap())
|
||||
out = append(out, &es.Source)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (s *Source) WrapError(err error) error {
|
||||
return &ErrorSource{error: err, Source: *s}
|
||||
}
|
||||
|
||||
func (s *Source) Print(w io.Writer) error {
|
||||
si := s.Info
|
||||
if si == nil {
|
||||
return nil
|
||||
}
|
||||
lines := strings.Split(string(si.Data), "\n")
|
||||
|
||||
start, end, ok := getStartEndLine(s.Ranges)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if start > len(lines) || start < 1 {
|
||||
return nil
|
||||
}
|
||||
if end > len(lines) {
|
||||
end = len(lines)
|
||||
}
|
||||
|
||||
pad := 2
|
||||
if end == start {
|
||||
pad = 4
|
||||
}
|
||||
var p int
|
||||
|
||||
prepadStart := start
|
||||
for {
|
||||
if p >= pad {
|
||||
break
|
||||
}
|
||||
if start > 1 {
|
||||
start--
|
||||
p++
|
||||
}
|
||||
if end != len(lines) {
|
||||
end++
|
||||
p++
|
||||
}
|
||||
p++
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "%s:%d\n--------------------\n", si.Filename, prepadStart)
|
||||
for i := start; i <= end; i++ {
|
||||
pfx := " "
|
||||
if containsLine(s.Ranges, i) {
|
||||
pfx = ">>>"
|
||||
}
|
||||
fmt.Fprintf(w, " %3d | %s %s\n", i, pfx, lines[i-1])
|
||||
}
|
||||
fmt.Fprintf(w, "--------------------\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
func containsLine(rr []*pb.Range, l int) bool {
|
||||
for _, r := range rr {
|
||||
e := r.End.Line
|
||||
if e < r.Start.Line {
|
||||
e = r.Start.Line
|
||||
}
|
||||
if r.Start.Line <= int32(l) && e >= int32(l) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getStartEndLine(rr []*pb.Range) (start int, end int, ok bool) {
|
||||
first := true
|
||||
for _, r := range rr {
|
||||
e := r.End.Line
|
||||
if e < r.Start.Line {
|
||||
e = r.Start.Line
|
||||
}
|
||||
if first || int(r.Start.Line) < start {
|
||||
start = int(r.Start.Line)
|
||||
}
|
||||
if int(e) > end {
|
||||
end = int(e)
|
||||
}
|
||||
first = false
|
||||
}
|
||||
return start, end, !first
|
||||
}
|
36
vendor/github.com/moby/buildkit/solver/errdefs/vertex.go
generated
vendored
Normal file
36
vendor/github.com/moby/buildkit/solver/errdefs/vertex.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
package errdefs
|
||||
|
||||
import (
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Vertex)(nil), "errdefs.Vertex")
|
||||
proto.RegisterType((*Source)(nil), "errdefs.Source")
|
||||
}
|
||||
|
||||
type VertexError struct {
|
||||
Vertex
|
||||
error
|
||||
}
|
||||
|
||||
func (e *VertexError) Unwrap() error {
|
||||
return e.error
|
||||
}
|
||||
|
||||
func (e *VertexError) ToProto() grpcerrors.TypedErrorProto {
|
||||
return &e.Vertex
|
||||
}
|
||||
|
||||
func WrapVertex(err error, dgst digest.Digest) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &VertexError{Vertex: Vertex{Digest: dgst.String()}, error: err}
|
||||
}
|
||||
|
||||
func (v *Vertex) WrapError(err error) error {
|
||||
return &VertexError{error: err, Vertex: *v}
|
||||
}
|
2
vendor/github.com/moby/buildkit/solver/internal/pipe/pipe.go
generated
vendored
2
vendor/github.com/moby/buildkit/solver/internal/pipe/pipe.go
generated
vendored
|
@ -159,7 +159,7 @@ func (pw *sender) Finalize(v interface{}, err error) {
|
|||
}
|
||||
pw.status.Err = err
|
||||
pw.status.Completed = true
|
||||
if errors.Cause(err) == context.Canceled && pw.req.Canceled {
|
||||
if errors.Is(err, context.Canceled) && pw.req.Canceled {
|
||||
pw.status.Canceled = true
|
||||
}
|
||||
pw.sendChannel.Send(pw.status)
|
||||
|
|
16
vendor/github.com/moby/buildkit/solver/jobs.go
generated
vendored
16
vendor/github.com/moby/buildkit/solver/jobs.go
generated
vendored
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
"github.com/moby/buildkit/util/flightcontrol"
|
||||
"github.com/moby/buildkit/util/progress"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
|
@ -53,6 +54,7 @@ type state struct {
|
|||
|
||||
vtx Vertex
|
||||
clientVertex client.Vertex
|
||||
origDigest digest.Digest // original LLB digest. TODO: probably better to use string ID so this isn't needed
|
||||
|
||||
mu sync.Mutex
|
||||
op *sharedOp
|
||||
|
@ -318,6 +320,7 @@ func (jl *Solver) loadUnlocked(v, parent Vertex, j *Job, cache map[Vertex]Vertex
|
|||
mainCache: jl.opts.DefaultCache,
|
||||
cache: map[string]CacheManager{},
|
||||
solver: jl,
|
||||
origDigest: origVtx.Digest(),
|
||||
}
|
||||
jl.actives[dgst] = st
|
||||
}
|
||||
|
@ -564,7 +567,10 @@ func (s *sharedOp) LoadCache(ctx context.Context, rec *CacheRecord) (Result, err
|
|||
return res, err
|
||||
}
|
||||
|
||||
func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, f ResultBasedCacheFunc, res Result) (digest.Digest, error) {
|
||||
func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, f ResultBasedCacheFunc, res Result) (dgst digest.Digest, err error) {
|
||||
defer func() {
|
||||
err = errdefs.WrapVertex(err, s.st.origDigest)
|
||||
}()
|
||||
key, err := s.g.Do(ctx, fmt.Sprintf("slow-compute-%d", index), func(ctx context.Context) (interface{}, error) {
|
||||
s.slowMu.Lock()
|
||||
// TODO: add helpers for these stored values
|
||||
|
@ -609,7 +615,10 @@ func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, f ResultBased
|
|||
return key.(digest.Digest), nil
|
||||
}
|
||||
|
||||
func (s *sharedOp) CacheMap(ctx context.Context, index int) (*cacheMapResp, error) {
|
||||
func (s *sharedOp) CacheMap(ctx context.Context, index int) (resp *cacheMapResp, err error) {
|
||||
defer func() {
|
||||
err = errdefs.WrapVertex(err, s.st.origDigest)
|
||||
}()
|
||||
op, err := s.getOp()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -665,6 +674,9 @@ func (s *sharedOp) CacheMap(ctx context.Context, index int) (*cacheMapResp, erro
|
|||
}
|
||||
|
||||
func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, exporters []ExportableCacheKey, err error) {
|
||||
defer func() {
|
||||
err = errdefs.WrapVertex(err, s.st.origDigest)
|
||||
}()
|
||||
op, err := s.getOp()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
|
31
vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go
generated
vendored
31
vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go
generated
vendored
|
@ -18,12 +18,12 @@ import (
|
|||
gw "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/flightcontrol"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"github.com/moby/buildkit/worker"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
@ -36,7 +36,6 @@ type llbBridge struct {
|
|||
resolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc
|
||||
cms map[string]solver.CacheManager
|
||||
cmsMu sync.Mutex
|
||||
platforms []specs.Platform
|
||||
sm *session.Manager
|
||||
}
|
||||
|
||||
|
@ -88,7 +87,7 @@ func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImp
|
|||
}
|
||||
dpc := &detectPrunedCacheID{}
|
||||
|
||||
edge, err := Load(def, dpc.Load, ValidateEntitlements(ent), WithCacheSources(cms), RuntimePlatforms(b.platforms), WithValidateCaps())
|
||||
edge, err := Load(def, dpc.Load, ValidateEntitlements(ent), WithCacheSources(cms), NormalizeRuntimePlatforms(), WithValidateCaps())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to load LLB")
|
||||
}
|
||||
|
@ -182,7 +181,31 @@ func (rp *resultProxy) Release(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (rp *resultProxy) Result(ctx context.Context) (solver.CachedResult, error) {
|
||||
func (rp *resultProxy) wrapError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
var ve *errdefs.VertexError
|
||||
if errors.As(err, &ve) {
|
||||
if rp.def.Source != nil {
|
||||
locs, ok := rp.def.Source.Locations[string(ve.Digest)]
|
||||
if ok {
|
||||
for _, loc := range locs.Locations {
|
||||
err = errdefs.WithSource(err, errdefs.Source{
|
||||
Info: rp.def.Source.Infos[loc.SourceIndex],
|
||||
Ranges: loc.Ranges,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (rp *resultProxy) Result(ctx context.Context) (res solver.CachedResult, err error) {
|
||||
defer func() {
|
||||
err = rp.wrapError(err)
|
||||
}()
|
||||
r, err := rp.g.Do(ctx, "result", func(ctx context.Context) (interface{}, error) {
|
||||
rp.mu.Lock()
|
||||
if rp.released {
|
||||
|
|
25
vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go
generated
vendored
25
vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go
generated
vendored
|
@ -34,16 +34,17 @@ func mapUserToChowner(user *copy.User, idmap *idtools.IdentityMapping) (copy.Cho
|
|||
return nil, nil
|
||||
}
|
||||
old = ©.User{} // root
|
||||
}
|
||||
if idmap != nil {
|
||||
identity, err := idmap.ToHost(idtools.Identity{
|
||||
UID: old.Uid,
|
||||
GID: old.Gid,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// non-nil old is already mapped
|
||||
if idmap != nil {
|
||||
identity, err := idmap.ToHost(idtools.Identity{
|
||||
UID: old.Uid,
|
||||
GID: old.Gid,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ©.User{Uid: identity.UID, Gid: identity.GID}, nil
|
||||
}
|
||||
return ©.User{Uid: identity.UID, Gid: identity.GID}, nil
|
||||
}
|
||||
return old, nil
|
||||
}, nil
|
||||
|
@ -82,7 +83,7 @@ func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *copy.
|
|||
}
|
||||
} else {
|
||||
if err := os.Mkdir(p, os.FileMode(action.Mode)&0777); err != nil {
|
||||
if os.IsExist(err) {
|
||||
if errors.Is(err, os.ErrExist) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
|
@ -151,7 +152,7 @@ func rmPath(root, src string, allowNotFound bool) error {
|
|||
}
|
||||
|
||||
if err := os.RemoveAll(p); err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) && allowNotFound {
|
||||
if errors.Is(err, os.ErrNotExist) && allowNotFound {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
|
@ -293,6 +294,7 @@ func (fb *Backend) Mkfile(ctx context.Context, m, user, group fileoptypes.Mount,
|
|||
|
||||
return mkfile(ctx, dir, action, u, mnt.m.IdentityMapping())
|
||||
}
|
||||
|
||||
func (fb *Backend) Rm(ctx context.Context, m fileoptypes.Mount, action pb.FileActionRm) error {
|
||||
mnt, ok := m.(*Mount)
|
||||
if !ok {
|
||||
|
@ -308,6 +310,7 @@ func (fb *Backend) Rm(ctx context.Context, m fileoptypes.Mount, action pb.FileAc
|
|||
|
||||
return rm(ctx, dir, action)
|
||||
}
|
||||
|
||||
func (fb *Backend) Copy(ctx context.Context, m1, m2, user, group fileoptypes.Mount, action pb.FileActionCopy) error {
|
||||
mnt1, ok := m1.(*Mount)
|
||||
if !ok {
|
||||
|
|
22
vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go
generated
vendored
22
vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go
generated
vendored
|
@ -31,6 +31,7 @@ import (
|
|||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/solver/llbsolver"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
"github.com/moby/buildkit/util/progress/logs"
|
||||
utilsystem "github.com/moby/buildkit/util/system"
|
||||
"github.com/moby/buildkit/worker"
|
||||
|
@ -41,7 +42,6 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const execCacheType = "buildkit.exec.v0"
|
||||
|
@ -293,7 +293,7 @@ func (g *cacheRefGetter) getRefCacheDirNoCache(ctx context.Context, key string,
|
|||
if mRef, err := g.cm.GetMutable(ctx, si.ID()); err == nil {
|
||||
logrus.Debugf("reusing ref for cache dir: %s", mRef.ID())
|
||||
return mRef, nil
|
||||
} else if errors.Cause(err) == cache.ErrLocked {
|
||||
} else if errors.Is(err, cache.ErrLocked) {
|
||||
locked = true
|
||||
}
|
||||
}
|
||||
|
@ -349,7 +349,7 @@ func (e *execOp) getSSHMountable(ctx context.Context, m *pb.Mount) (cache.Mounta
|
|||
if m.SSHOpt.Optional {
|
||||
return nil, nil
|
||||
}
|
||||
if st, ok := status.FromError(errors.Cause(err)); ok && st.Code() == codes.Unimplemented {
|
||||
if grpcerrors.Code(err) == codes.Unimplemented {
|
||||
return nil, errors.Errorf("no SSH key %q forwarded from the client", m.SSHOpt.ID)
|
||||
}
|
||||
return nil, err
|
||||
|
@ -447,7 +447,7 @@ func (e *execOp) getSecretMountable(ctx context.Context, m *pb.Mount) (cache.Mou
|
|||
|
||||
dt, err := secrets.GetSecret(ctx, caller, id)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == secrets.ErrNotFound && m.SecretOpt.Optional {
|
||||
if errors.Is(err, secrets.ErrNotFound) && m.SecretOpt.Optional {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
|
@ -708,6 +708,20 @@ func (e *execOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Res
|
|||
return nil, err
|
||||
}
|
||||
|
||||
emu, err := getEmulator(e.platform, e.cm.IdentityMapping())
|
||||
if err == nil && emu != nil {
|
||||
e.op.Meta.Args = append([]string{qemuMountName}, e.op.Meta.Args...)
|
||||
|
||||
mounts = append(mounts, executor.Mount{
|
||||
Readonly: true,
|
||||
Src: emu,
|
||||
Dest: qemuMountName,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Warn(err.Error()) // TODO: remove this with pull support
|
||||
}
|
||||
|
||||
meta := executor.Meta{
|
||||
Args: e.op.Meta.Args,
|
||||
Env: e.op.Meta.Env,
|
||||
|
|
114
vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec_binfmt.go
generated
vendored
Normal file
114
vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec_binfmt.go
generated
vendored
Normal file
|
@ -0,0 +1,114 @@
|
|||
package ops
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/binfmt_misc"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
copy "github.com/tonistiigi/fsutil/copy"
|
||||
)
|
||||
|
||||
const qemuMountName = "/dev/.buildkit_qemu_emulator"
|
||||
|
||||
var qemuArchMap = map[string]string{
|
||||
"arm64": "aarch64",
|
||||
"amd64": "x86_64",
|
||||
"riscv64": "riscv64",
|
||||
"arm": "arm",
|
||||
"s390x": "s390x",
|
||||
"ppc64le": "ppc64le",
|
||||
}
|
||||
|
||||
type emulator struct {
|
||||
path string
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (e *emulator) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
|
||||
return &staticEmulatorMount{path: e.path, idmap: e.idmap}, nil
|
||||
}
|
||||
|
||||
type staticEmulatorMount struct {
|
||||
path string
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (m *staticEmulatorMount) Mount() ([]mount.Mount, func() error, error) {
|
||||
tmpdir, err := ioutil.TempDir("", "buildkit-qemu-emulator")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var ret bool
|
||||
defer func() {
|
||||
if !ret {
|
||||
os.RemoveAll(tmpdir)
|
||||
}
|
||||
}()
|
||||
|
||||
var uid, gid int
|
||||
if m.idmap != nil {
|
||||
root := m.idmap.RootPair()
|
||||
uid = root.UID
|
||||
gid = root.GID
|
||||
}
|
||||
if err := copy.Copy(context.TODO(), filepath.Dir(m.path), filepath.Base(m.path), tmpdir, qemuMountName, func(ci *copy.CopyInfo) {
|
||||
m := 0555
|
||||
ci.Mode = &m
|
||||
}, copy.WithChown(uid, gid)); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ret = true
|
||||
return []mount.Mount{{
|
||||
Type: "bind",
|
||||
Source: filepath.Join(tmpdir, qemuMountName),
|
||||
Options: []string{"ro", "bind"},
|
||||
}}, func() error {
|
||||
return os.RemoveAll(tmpdir)
|
||||
}, nil
|
||||
|
||||
}
|
||||
func (m *staticEmulatorMount) IdentityMapping() *idtools.IdentityMapping {
|
||||
return m.idmap
|
||||
}
|
||||
|
||||
func getEmulator(p *pb.Platform, idmap *idtools.IdentityMapping) (*emulator, error) {
|
||||
all := binfmt_misc.SupportedPlatforms(false)
|
||||
m := make(map[string]struct{}, len(all))
|
||||
|
||||
for _, p := range all {
|
||||
m[p] = struct{}{}
|
||||
}
|
||||
|
||||
pp := platforms.Normalize(specs.Platform{
|
||||
Architecture: p.Architecture,
|
||||
OS: p.OS,
|
||||
Variant: p.Variant,
|
||||
})
|
||||
|
||||
if _, ok := m[platforms.Format(pp)]; ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
a, ok := qemuArchMap[pp.Architecture]
|
||||
if !ok {
|
||||
a = pp.Architecture
|
||||
}
|
||||
|
||||
fn, err := exec.LookPath("buildkit-qemu-" + a)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("no emulator available for %v", pp.OS)
|
||||
}
|
||||
|
||||
return &emulator{path: fn}, nil
|
||||
}
|
10
vendor/github.com/moby/buildkit/solver/llbsolver/solver.go
generated
vendored
10
vendor/github.com/moby/buildkit/solver/llbsolver/solver.go
generated
vendored
|
@ -20,7 +20,6 @@ import (
|
|||
"github.com/moby/buildkit/util/progress"
|
||||
"github.com/moby/buildkit/worker"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
@ -43,7 +42,6 @@ type Solver struct {
|
|||
eachWorker func(func(worker.Worker) error) error
|
||||
frontends map[string]frontend.Frontend
|
||||
resolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc
|
||||
platforms []specs.Platform
|
||||
gatewayForwarder *controlgateway.GatewayForwarder
|
||||
sm *session.Manager
|
||||
entitlements []string
|
||||
|
@ -61,13 +59,6 @@ func New(wc *worker.Controller, f map[string]frontend.Frontend, cache solver.Cac
|
|||
entitlements: ents,
|
||||
}
|
||||
|
||||
// executing is currently only allowed on default worker
|
||||
w, err := wc.GetDefault()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.platforms = w.Platforms(false)
|
||||
|
||||
s.solver = solver.NewSolver(solver.SolverOpt{
|
||||
ResolveOpFunc: s.resolver(),
|
||||
DefaultCache: cache,
|
||||
|
@ -93,7 +84,6 @@ func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge {
|
|||
eachWorker: s.eachWorker,
|
||||
resolveCacheImporterFuncs: s.resolveCacheImporterFuncs,
|
||||
cms: map[string]solver.CacheManager{},
|
||||
platforms: s.platforms,
|
||||
sm: s.sm,
|
||||
}
|
||||
}
|
||||
|
|
23
vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go
generated
vendored
23
vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go
generated
vendored
|
@ -8,7 +8,6 @@ import (
|
|||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/source"
|
||||
"github.com/moby/buildkit/util/binfmt_misc"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
@ -69,12 +68,8 @@ func WithCacheSources(cms []solver.CacheManager) LoadOpt {
|
|||
}
|
||||
}
|
||||
|
||||
func RuntimePlatforms(p []specs.Platform) LoadOpt {
|
||||
func NormalizeRuntimePlatforms() LoadOpt {
|
||||
var defaultPlatform *pb.Platform
|
||||
pp := make([]specs.Platform, len(p))
|
||||
for i := range p {
|
||||
pp[i] = platforms.Normalize(p[i])
|
||||
}
|
||||
return func(op *pb.Op, _ *pb.OpMetadata, opt *solver.VertexOptions) error {
|
||||
if op.Platform == nil {
|
||||
if defaultPlatform == nil {
|
||||
|
@ -96,22 +91,6 @@ func RuntimePlatforms(p []specs.Platform) LoadOpt {
|
|||
Variant: normalizedPlatform.Variant,
|
||||
}
|
||||
|
||||
if _, ok := op.Op.(*pb.Op_Exec); ok {
|
||||
var found bool
|
||||
for _, pp := range pp {
|
||||
if pp.OS == op.Platform.OS && pp.Architecture == op.Platform.Architecture && pp.Variant == op.Platform.Variant {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
if !binfmt_misc.Check(normalizedPlatform) {
|
||||
return errors.Errorf("runtime execution on platform %s not supported", platforms.Format(specs.Platform{OS: op.Platform.OS, Architecture: op.Platform.Architecture, Variant: op.Platform.Variant}))
|
||||
} else {
|
||||
pp = append(pp, normalizedPlatform)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
1809
vendor/github.com/moby/buildkit/solver/pb/ops.pb.go
generated
vendored
1809
vendor/github.com/moby/buildkit/solver/pb/ops.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
40
vendor/github.com/moby/buildkit/solver/pb/ops.proto
generated
vendored
40
vendor/github.com/moby/buildkit/solver/pb/ops.proto
generated
vendored
|
@ -177,6 +177,42 @@ message OpMetadata {
|
|||
map<string, bool> caps = 5 [(gogoproto.castkey) = "github.com/moby/buildkit/util/apicaps.CapID", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// Source is a source mapping description for a file
|
||||
message Source {
|
||||
map<string, Locations> locations = 1;
|
||||
repeated SourceInfo infos = 2;
|
||||
}
|
||||
|
||||
// Locations is a list of ranges with a index to its source map.
|
||||
message Locations {
|
||||
repeated Location locations = 1;
|
||||
}
|
||||
|
||||
// Source info contains the shared metadata of a source mapping
|
||||
message SourceInfo {
|
||||
string filename = 1;
|
||||
bytes data = 2;
|
||||
Definition definition = 3;
|
||||
}
|
||||
|
||||
// Location defines list of areas in to source file
|
||||
message Location {
|
||||
int32 sourceIndex = 1;
|
||||
repeated Range ranges = 2;
|
||||
}
|
||||
|
||||
// Range is an area in the source file
|
||||
message Range {
|
||||
Position start = 1 [(gogoproto.nullable) = false];
|
||||
Position end = 2 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// Position is single location in a source file
|
||||
message Position {
|
||||
int32 Line = 1;
|
||||
int32 Character = 2;
|
||||
}
|
||||
|
||||
message ExportCache {
|
||||
bool Value = 1;
|
||||
}
|
||||
|
@ -200,6 +236,8 @@ message Definition {
|
|||
// metadata contains metadata for the each of the Op messages.
|
||||
// A key must be an LLB op digest string. Currently, empty string is not expected as a key, but it may change in the future.
|
||||
map<string, OpMetadata> metadata = 2 [(gogoproto.castkey) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
// Source contains the source mapping information for the vertexes in the definition
|
||||
Source Source = 3;
|
||||
}
|
||||
|
||||
message HostIP {
|
||||
|
@ -302,4 +340,4 @@ message UserOpt {
|
|||
message NamedUserOpt {
|
||||
string name = 1;
|
||||
int64 input = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
|
||||
}
|
||||
}
|
||||
|
|
2
vendor/github.com/moby/buildkit/source/git/gitsource.go
generated
vendored
2
vendor/github.com/moby/buildkit/source/git/gitsource.go
generated
vendored
|
@ -72,7 +72,7 @@ func (gs *gitSource) mountRemote(ctx context.Context, remote string) (target str
|
|||
for _, si := range sis {
|
||||
remoteRef, err = gs.cache.GetMutable(ctx, si.ID())
|
||||
if err != nil {
|
||||
if cache.IsLocked(err) {
|
||||
if errors.Is(err, cache.ErrLocked) {
|
||||
// should never really happen as no other function should access this metadata, but lets be graceful
|
||||
logrus.Warnf("mutable ref for %s %s was locked: %v", remote, si.ID(), err)
|
||||
continue
|
||||
|
|
42
vendor/github.com/moby/buildkit/util/binfmt_misc/check.go
generated
vendored
42
vendor/github.com/moby/buildkit/util/binfmt_misc/check.go
generated
vendored
|
@ -1,42 +0,0 @@
|
|||
package binfmt_misc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func check(bin string) error {
|
||||
tmpdir, err := ioutil.TempDir("", "qemu-check")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
pp := filepath.Join(tmpdir, "check")
|
||||
|
||||
r, err := gzip.NewReader(bytes.NewReader([]byte(bin)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
f, err := os.OpenFile(pp, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(f, r); err != nil {
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
|
||||
cmd := exec.Command("/check")
|
||||
withChroot(cmd, tmpdir)
|
||||
err = cmd.Run()
|
||||
return err
|
||||
}
|
37
vendor/github.com/moby/buildkit/util/binfmt_misc/check_unix.go
generated
vendored
37
vendor/github.com/moby/buildkit/util/binfmt_misc/check_unix.go
generated
vendored
|
@ -3,7 +3,13 @@
|
|||
package binfmt_misc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
|
@ -12,3 +18,34 @@ func withChroot(cmd *exec.Cmd, dir string) {
|
|||
Chroot: dir,
|
||||
}
|
||||
}
|
||||
|
||||
func check(bin string) error {
|
||||
tmpdir, err := ioutil.TempDir("", "qemu-check")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
pp := filepath.Join(tmpdir, "check")
|
||||
|
||||
r, err := gzip.NewReader(bytes.NewReader([]byte(bin)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
f, err := os.OpenFile(pp, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(f, r); err != nil {
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
|
||||
cmd := exec.Command("/check")
|
||||
withChroot(cmd, tmpdir)
|
||||
err = cmd.Run()
|
||||
return err
|
||||
}
|
||||
|
|
5
vendor/github.com/moby/buildkit/util/binfmt_misc/check_windows.go
generated
vendored
5
vendor/github.com/moby/buildkit/util/binfmt_misc/check_windows.go
generated
vendored
|
@ -3,8 +3,13 @@
|
|||
package binfmt_misc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func withChroot(cmd *exec.Cmd, dir string) {
|
||||
}
|
||||
|
||||
func check(bin string) error {
|
||||
return errors.New("binfmt is not supported on Windows")
|
||||
}
|
||||
|
|
2
vendor/github.com/moby/buildkit/util/entitlements/entitlements.go
generated
vendored
2
vendor/github.com/moby/buildkit/util/entitlements/entitlements.go
generated
vendored
|
@ -43,7 +43,7 @@ func WhiteList(allowed, supported []Entitlement) (Set, error) {
|
|||
}
|
||||
if supported != nil {
|
||||
if !supm.Allowed(e) {
|
||||
return nil, errors.Errorf("entitlement %s is not allowed", e)
|
||||
return nil, errors.Errorf("granting entitlement %s is not allowed by build daemon configuration", e)
|
||||
}
|
||||
}
|
||||
m[e] = struct{}{}
|
||||
|
|
2
vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go
generated
vendored
2
vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go
generated
vendored
|
@ -35,7 +35,7 @@ func (g *Group) Do(ctx context.Context, key string, fn func(ctx context.Context)
|
|||
var backoff time.Duration
|
||||
for {
|
||||
v, err = g.do(ctx, key, fn)
|
||||
if err == nil || errors.Cause(err) != errRetry {
|
||||
if err == nil || !errors.Is(err, errRetry) {
|
||||
return v, err
|
||||
}
|
||||
// backoff logic
|
||||
|
|
188
vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go
generated
vendored
Normal file
188
vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go
generated
vendored
Normal file
|
@ -0,0 +1,188 @@
|
|||
package grpcerrors
|
||||
|
||||
import (
|
||||
gogotypes "github.com/gogo/protobuf/types"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/moby/buildkit/util/stack"
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type TypedError interface {
|
||||
ToProto() TypedErrorProto
|
||||
}
|
||||
|
||||
type TypedErrorProto interface {
|
||||
proto.Message
|
||||
WrapError(error) error
|
||||
}
|
||||
|
||||
func ToGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
st, ok := AsGRPCStatus(err)
|
||||
if !ok || st == nil {
|
||||
st = status.New(Code(err), err.Error())
|
||||
}
|
||||
if st.Code() != Code(err) {
|
||||
pb := st.Proto()
|
||||
pb.Code = int32(Code(err))
|
||||
st = status.FromProto(pb)
|
||||
}
|
||||
|
||||
var details []proto.Message
|
||||
|
||||
for _, st := range stack.Traces(err) {
|
||||
details = append(details, st)
|
||||
}
|
||||
|
||||
each(err, func(err error) {
|
||||
if te, ok := err.(TypedError); ok {
|
||||
details = append(details, te.ToProto())
|
||||
}
|
||||
})
|
||||
|
||||
if len(details) > 0 {
|
||||
if st2, err := st.WithDetails(details...); err == nil {
|
||||
st = st2
|
||||
}
|
||||
}
|
||||
|
||||
return st.Err()
|
||||
}
|
||||
|
||||
func Code(err error) codes.Code {
|
||||
if se, ok := err.(interface {
|
||||
Code() codes.Code
|
||||
}); ok {
|
||||
return se.Code()
|
||||
}
|
||||
|
||||
if se, ok := err.(interface {
|
||||
GRPCStatus() *status.Status
|
||||
}); ok {
|
||||
return se.GRPCStatus().Code()
|
||||
}
|
||||
|
||||
wrapped, ok := err.(interface {
|
||||
Unwrap() error
|
||||
})
|
||||
if ok {
|
||||
return Code(wrapped.Unwrap())
|
||||
}
|
||||
|
||||
return status.FromContextError(err).Code()
|
||||
}
|
||||
|
||||
func WrapCode(err error, code codes.Code) error {
|
||||
return &withCode{error: err, code: code}
|
||||
}
|
||||
|
||||
func AsGRPCStatus(err error) (*status.Status, bool) {
|
||||
if err == nil {
|
||||
return nil, true
|
||||
}
|
||||
if se, ok := err.(interface {
|
||||
GRPCStatus() *status.Status
|
||||
}); ok {
|
||||
return se.GRPCStatus(), true
|
||||
}
|
||||
|
||||
wrapped, ok := err.(interface {
|
||||
Unwrap() error
|
||||
})
|
||||
if ok {
|
||||
return AsGRPCStatus(wrapped.Unwrap())
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func FromGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
st, ok := status.FromError(err)
|
||||
if !ok {
|
||||
return err
|
||||
}
|
||||
|
||||
pb := st.Proto()
|
||||
|
||||
n := &spb.Status{
|
||||
Code: pb.Code,
|
||||
Message: pb.Message,
|
||||
}
|
||||
|
||||
details := make([]TypedErrorProto, 0, len(pb.Details))
|
||||
stacks := make([]*stack.Stack, 0, len(pb.Details))
|
||||
|
||||
// details that we don't understand are copied as proto
|
||||
for _, d := range pb.Details {
|
||||
var m interface{}
|
||||
detail := &ptypes.DynamicAny{}
|
||||
if err := ptypes.UnmarshalAny(d, detail); err != nil {
|
||||
detail := &gogotypes.DynamicAny{}
|
||||
if err := gogotypes.UnmarshalAny(gogoAny(d), detail); err != nil {
|
||||
n.Details = append(n.Details, d)
|
||||
continue
|
||||
}
|
||||
m = detail.Message
|
||||
} else {
|
||||
m = detail.Message
|
||||
}
|
||||
|
||||
switch v := m.(type) {
|
||||
case *stack.Stack:
|
||||
stacks = append(stacks, v)
|
||||
case TypedErrorProto:
|
||||
details = append(details, v)
|
||||
default:
|
||||
n.Details = append(n.Details, d)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
err = status.FromProto(n).Err()
|
||||
|
||||
for _, s := range stacks {
|
||||
if s != nil {
|
||||
err = stack.Wrap(err, *s)
|
||||
}
|
||||
}
|
||||
|
||||
for _, d := range details {
|
||||
err = d.WrapError(err)
|
||||
}
|
||||
|
||||
return stack.Enable(err)
|
||||
}
|
||||
|
||||
type withCode struct {
|
||||
code codes.Code
|
||||
error
|
||||
}
|
||||
|
||||
func (e *withCode) Unwrap() error {
|
||||
return e.error
|
||||
}
|
||||
|
||||
func each(err error, fn func(error)) {
|
||||
fn(err)
|
||||
if wrapped, ok := err.(interface {
|
||||
Unwrap() error
|
||||
}); ok {
|
||||
each(wrapped.Unwrap(), fn)
|
||||
}
|
||||
}
|
||||
|
||||
func gogoAny(in *any.Any) *gogotypes.Any {
|
||||
return &gogotypes.Any{
|
||||
TypeUrl: in.TypeUrl,
|
||||
Value: in.Value,
|
||||
}
|
||||
}
|
28
vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go
generated
vendored
Normal file
28
vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
package grpcerrors
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
||||
resp, err = handler(ctx, req)
|
||||
if err != nil {
|
||||
err = ToGRPC(err)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
return ToGRPC(handler(srv, ss))
|
||||
}
|
||||
|
||||
func UnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||
return FromGRPC(invoker(ctx, method, req, reply, cc, opts...))
|
||||
}
|
||||
|
||||
func StreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
s, err := streamer(ctx, desc, cc, method, opts...)
|
||||
return s, ToGRPC(err)
|
||||
}
|
2
vendor/github.com/moby/buildkit/util/resolver/resolver.go
generated
vendored
2
vendor/github.com/moby/buildkit/util/resolver/resolver.go
generated
vendored
|
@ -50,7 +50,7 @@ func fillInsecureOpts(host string, c config.RegistryConfig, h *docker.RegistryHo
|
|||
func loadTLSConfig(c config.RegistryConfig) (*tls.Config, error) {
|
||||
for _, d := range c.TLSConfigDir {
|
||||
fs, err := ioutil.ReadDir(d)
|
||||
if err != nil && !os.IsNotExist(err) && !os.IsPermission(err) {
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, os.ErrPermission) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
for _, f := range fs {
|
||||
|
|
2
vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_linux.go
generated
vendored
2
vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_linux.go
generated
vendored
|
@ -3,7 +3,7 @@ package specconv
|
|||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
// ToRootless converts spec to be compatible with "rootless" runc.
|
||||
|
|
3
vendor/github.com/moby/buildkit/util/stack/generate.go
generated
vendored
Normal file
3
vendor/github.com/moby/buildkit/util/stack/generate.go
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
package stack
|
||||
|
||||
//go:generate protoc -I=. -I=../../vendor/ --go_out=. stack.proto
|
151
vendor/github.com/moby/buildkit/util/stack/stack.go
generated
vendored
Normal file
151
vendor/github.com/moby/buildkit/util/stack/stack.go
generated
vendored
Normal file
|
@ -0,0 +1,151 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
io "io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var version string
|
||||
var revision string
|
||||
|
||||
func SetVersionInfo(v, r string) {
|
||||
version = v
|
||||
revision = r
|
||||
}
|
||||
|
||||
func Traces(err error) []*Stack {
|
||||
var st []*Stack
|
||||
|
||||
wrapped, ok := err.(interface {
|
||||
Unwrap() error
|
||||
})
|
||||
if ok {
|
||||
st = Traces(wrapped.Unwrap())
|
||||
}
|
||||
|
||||
if ste, ok := err.(interface {
|
||||
StackTrace() errors.StackTrace
|
||||
}); ok {
|
||||
st = append(st, convertStack(ste.StackTrace()))
|
||||
}
|
||||
|
||||
if ste, ok := err.(interface {
|
||||
StackTrace() *Stack
|
||||
}); ok {
|
||||
st = append(st, ste.StackTrace())
|
||||
}
|
||||
|
||||
return st
|
||||
}
|
||||
|
||||
func Enable(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if !hasLocalStackTrace(err) {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func Wrap(err error, s Stack) error {
|
||||
return &withStack{stack: s, error: err}
|
||||
}
|
||||
|
||||
func hasLocalStackTrace(err error) bool {
|
||||
wrapped, ok := err.(interface {
|
||||
Unwrap() error
|
||||
})
|
||||
if ok && hasLocalStackTrace(wrapped.Unwrap()) {
|
||||
return true
|
||||
}
|
||||
|
||||
_, ok = err.(interface {
|
||||
StackTrace() errors.StackTrace
|
||||
})
|
||||
return ok
|
||||
}
|
||||
|
||||
func Formatter(err error) fmt.Formatter {
|
||||
return &formatter{err}
|
||||
}
|
||||
|
||||
type formatter struct {
|
||||
error
|
||||
}
|
||||
|
||||
func (w *formatter) Format(s fmt.State, verb rune) {
|
||||
if w.error == nil {
|
||||
fmt.Fprintf(s, "%v", w.error)
|
||||
return
|
||||
}
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
fmt.Fprintf(s, "%s\n", w.Error())
|
||||
for _, stack := range Traces(w.error) {
|
||||
fmt.Fprintf(s, "%d %s %s\n", stack.Pid, stack.Version, strings.Join(stack.Cmdline, " "))
|
||||
for _, f := range stack.Frames {
|
||||
fmt.Fprintf(s, "%s\n\t%s:%d\n", f.Name, f.File, f.Line)
|
||||
}
|
||||
fmt.Fprintln(s)
|
||||
}
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's':
|
||||
io.WriteString(s, w.Error())
|
||||
case 'q':
|
||||
fmt.Fprintf(s, "%q", w.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func convertStack(s errors.StackTrace) *Stack {
|
||||
var out Stack
|
||||
for _, f := range s {
|
||||
dt, err := f.MarshalText()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
p := strings.SplitN(string(dt), " ", 2)
|
||||
if len(p) != 2 {
|
||||
continue
|
||||
}
|
||||
idx := strings.LastIndexByte(p[1], ':')
|
||||
if idx == -1 {
|
||||
continue
|
||||
}
|
||||
line, err := strconv.Atoi(p[1][idx+1:])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
out.Frames = append(out.Frames, &Frame{
|
||||
Name: p[0],
|
||||
File: p[1][:idx],
|
||||
Line: int32(line),
|
||||
})
|
||||
}
|
||||
out.Cmdline = os.Args
|
||||
out.Pid = int32(os.Getpid())
|
||||
out.Version = version
|
||||
out.Revision = revision
|
||||
return &out
|
||||
}
|
||||
|
||||
type withStack struct {
|
||||
stack Stack
|
||||
error
|
||||
}
|
||||
|
||||
func (e *withStack) Unwrap() error {
|
||||
return e.error
|
||||
}
|
||||
|
||||
func (e *withStack) StackTrace() *Stack {
|
||||
return &e.stack
|
||||
}
|
170
vendor/github.com/moby/buildkit/util/stack/stack.pb.go
generated
vendored
Normal file
170
vendor/github.com/moby/buildkit/util/stack/stack.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,170 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: stack.proto
|
||||
|
||||
package stack
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type Stack struct {
|
||||
Frames []*Frame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"`
|
||||
Cmdline []string `protobuf:"bytes,2,rep,name=cmdline,proto3" json:"cmdline,omitempty"`
|
||||
Pid int32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
|
||||
Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"`
|
||||
Revision string `protobuf:"bytes,5,opt,name=revision,proto3" json:"revision,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Stack) Reset() { *m = Stack{} }
|
||||
func (m *Stack) String() string { return proto.CompactTextString(m) }
|
||||
func (*Stack) ProtoMessage() {}
|
||||
func (*Stack) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b44c07feb2ca0a5a, []int{0}
|
||||
}
|
||||
|
||||
func (m *Stack) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Stack.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Stack) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Stack.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Stack) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Stack.Merge(m, src)
|
||||
}
|
||||
func (m *Stack) XXX_Size() int {
|
||||
return xxx_messageInfo_Stack.Size(m)
|
||||
}
|
||||
func (m *Stack) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Stack.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Stack proto.InternalMessageInfo
|
||||
|
||||
func (m *Stack) GetFrames() []*Frame {
|
||||
if m != nil {
|
||||
return m.Frames
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Stack) GetCmdline() []string {
|
||||
if m != nil {
|
||||
return m.Cmdline
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Stack) GetPid() int32 {
|
||||
if m != nil {
|
||||
return m.Pid
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Stack) GetVersion() string {
|
||||
if m != nil {
|
||||
return m.Version
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Stack) GetRevision() string {
|
||||
if m != nil {
|
||||
return m.Revision
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Frame struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"`
|
||||
File string `protobuf:"bytes,2,opt,name=File,proto3" json:"File,omitempty"`
|
||||
Line int32 `protobuf:"varint,3,opt,name=Line,proto3" json:"Line,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Frame) Reset() { *m = Frame{} }
|
||||
func (m *Frame) String() string { return proto.CompactTextString(m) }
|
||||
func (*Frame) ProtoMessage() {}
|
||||
func (*Frame) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b44c07feb2ca0a5a, []int{1}
|
||||
}
|
||||
|
||||
func (m *Frame) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Frame.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Frame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Frame.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Frame) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Frame.Merge(m, src)
|
||||
}
|
||||
func (m *Frame) XXX_Size() int {
|
||||
return xxx_messageInfo_Frame.Size(m)
|
||||
}
|
||||
func (m *Frame) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Frame.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Frame proto.InternalMessageInfo
|
||||
|
||||
func (m *Frame) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Frame) GetFile() string {
|
||||
if m != nil {
|
||||
return m.File
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Frame) GetLine() int32 {
|
||||
if m != nil {
|
||||
return m.Line
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Stack)(nil), "stack.Stack")
|
||||
proto.RegisterType((*Frame)(nil), "stack.Frame")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("stack.proto", fileDescriptor_b44c07feb2ca0a5a) }
|
||||
|
||||
var fileDescriptor_b44c07feb2ca0a5a = []byte{
|
||||
// 185 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x8f, 0x3d, 0xce, 0x82, 0x40,
|
||||
0x10, 0x86, 0xb3, 0xdf, 0xb2, 0x7c, 0x3a, 0x58, 0x98, 0xa9, 0x36, 0x56, 0x1b, 0x62, 0x41, 0x45,
|
||||
0xa1, 0x47, 0x30, 0xa1, 0x32, 0x16, 0x78, 0x02, 0x84, 0x35, 0xd9, 0xc8, 0x5f, 0x76, 0x09, 0xd7,
|
||||
0xf0, 0xca, 0x66, 0x06, 0xb4, 0x7b, 0xde, 0x9f, 0xe4, 0x9d, 0x81, 0x24, 0x4c, 0x55, 0xfd, 0xca,
|
||||
0x47, 0x3f, 0x4c, 0x03, 0x2a, 0x16, 0xe9, 0x5b, 0x80, 0xba, 0x13, 0xe1, 0x11, 0xe2, 0xa7, 0xaf,
|
||||
0x3a, 0x1b, 0xb4, 0x30, 0x32, 0x4b, 0x4e, 0xbb, 0x7c, 0xa9, 0x17, 0x64, 0x96, 0x6b, 0x86, 0x1a,
|
||||
0xfe, 0xeb, 0xae, 0x69, 0x5d, 0x6f, 0xf5, 0x9f, 0x91, 0xd9, 0xb6, 0xfc, 0x4a, 0xdc, 0x83, 0x1c,
|
||||
0x5d, 0xa3, 0xa5, 0x11, 0x99, 0x2a, 0x09, 0xa9, 0x3b, 0x5b, 0x1f, 0xdc, 0xd0, 0xeb, 0xc8, 0x08,
|
||||
0xea, 0xae, 0x12, 0x0f, 0xb0, 0xf1, 0x76, 0x76, 0x1c, 0x29, 0x8e, 0x7e, 0x3a, 0xbd, 0x80, 0xe2,
|
||||
0x49, 0x44, 0x88, 0x6e, 0x55, 0x67, 0xb5, 0xe0, 0x02, 0x33, 0x79, 0x85, 0x6b, 0x69, 0x9b, 0x3d,
|
||||
0x62, 0xf2, 0xae, 0x74, 0xcf, 0xb2, 0xcc, 0xfc, 0x88, 0xf9, 0xc9, 0xf3, 0x27, 0x00, 0x00, 0xff,
|
||||
0xff, 0xfd, 0x2c, 0xbb, 0xfb, 0xf3, 0x00, 0x00, 0x00,
|
||||
}
|
17
vendor/github.com/moby/buildkit/util/stack/stack.proto
generated
vendored
Normal file
17
vendor/github.com/moby/buildkit/util/stack/stack.proto
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package stack;
|
||||
|
||||
message Stack {
|
||||
repeated Frame frames = 1;
|
||||
repeated string cmdline = 2;
|
||||
int32 pid = 3;
|
||||
string version = 4;
|
||||
string revision = 5;
|
||||
}
|
||||
|
||||
message Frame {
|
||||
string Name = 1;
|
||||
string File = 2;
|
||||
int32 Line = 3;
|
||||
}
|
Loading…
Reference in a new issue