Merge pull request #42473 from thaJeztah/unfork_buildkit
revendor BuildKit (master branch)
This commit is contained in:
commit
9e8cf1016e
233 changed files with 2680 additions and 4925 deletions
|
@ -49,6 +49,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
const labelCreatedAt = "buildkit/createdat"
|
||||
|
@ -189,13 +190,15 @@ func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.Imm
|
|||
// ResolveOp converts a LLB vertex into a LLB operation
|
||||
func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) {
|
||||
if baseOp, ok := v.Sys().(*pb.Op); ok {
|
||||
// TODO do we need to pass a value here? Where should it come from? https://github.com/moby/buildkit/commit/b3cf7c43cfefdfd7a945002c0e76b54e346ab6cf
|
||||
var parallelism *semaphore.Weighted
|
||||
switch op := baseOp.Op.(type) {
|
||||
case *pb.Op_Source:
|
||||
return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, sm, w)
|
||||
return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, parallelism, sm, w)
|
||||
case *pb.Op_Exec:
|
||||
return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager(), sm, w.Opt.MetadataStore, w.Executor(), w)
|
||||
return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager(), parallelism, sm, w.Opt.MetadataStore, w.Executor(), w)
|
||||
case *pb.Op_File:
|
||||
return ops.NewFileOp(v, op, w.CacheManager(), w.Opt.MetadataStore, w)
|
||||
return ops.NewFileOp(v, op, w.CacheManager(), parallelism, w.Opt.MetadataStore, w)
|
||||
case *pb.Op_Build:
|
||||
return ops.NewBuildOp(v, op, s, w)
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
@ -107,9 +108,8 @@ func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, i
|
|||
|
||||
}
|
||||
|
||||
func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstruction, error) {
|
||||
func (o *copier) createCopyInstruction(sourcesAndDest instructions.SourcesAndDest, cmdName string) (copyInstruction, error) {
|
||||
inst := copyInstruction{cmdName: cmdName}
|
||||
last := len(args) - 1
|
||||
|
||||
// Work in platform-specific filepath semantics
|
||||
// TODO: This OS switch for paths is NOT correct and should not be supported.
|
||||
|
@ -118,9 +118,9 @@ func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstr
|
|||
if o.platform != nil {
|
||||
pathOS = o.platform.OS
|
||||
}
|
||||
inst.dest = fromSlash(args[last], pathOS)
|
||||
inst.dest = fromSlash(sourcesAndDest.DestPath, pathOS)
|
||||
separator := string(separator(pathOS))
|
||||
infos, err := o.getCopyInfosForSourcePaths(args[0:last], inst.dest)
|
||||
infos, err := o.getCopyInfosForSourcePaths(sourcesAndDest.SourcePaths, inst.dest)
|
||||
if err != nil {
|
||||
return inst, errors.Wrapf(err, "%s failed", cmdName)
|
||||
}
|
||||
|
|
|
@ -547,11 +547,19 @@ func TestRunIgnoresHealthcheck(t *testing.T) {
|
|||
assert.NilError(t, err)
|
||||
|
||||
expectedTest := []string{"CMD-SHELL", "curl -f http://localhost/ || exit 1"}
|
||||
cmd := &instructions.HealthCheckCommand{
|
||||
Health: &container.HealthConfig{
|
||||
Test: expectedTest,
|
||||
healthint, err := instructions.ParseInstruction(&parser.Node{
|
||||
Original: `HEALTHCHECK CMD curl -f http://localhost/ || exit 1`,
|
||||
Value: "healthcheck",
|
||||
Next: &parser.Node{
|
||||
Value: "cmd",
|
||||
Next: &parser.Node{
|
||||
Value: `curl -f http://localhost/ || exit 1`,
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
cmd := healthint.(*instructions.HealthCheckCommand)
|
||||
|
||||
assert.NilError(t, dispatch(sb, cmd))
|
||||
assert.Assert(t, sb.state.runConfig.Healthcheck != nil)
|
||||
|
||||
|
@ -562,12 +570,11 @@ func TestRunIgnoresHealthcheck(t *testing.T) {
|
|||
}
|
||||
|
||||
sb.state.buildArgs.AddArg("one", strPtr("two"))
|
||||
run := &instructions.RunCommand{
|
||||
ShellDependantCmdLine: instructions.ShellDependantCmdLine{
|
||||
CmdLine: strslice.StrSlice{"echo foo"},
|
||||
PrependShell: true,
|
||||
},
|
||||
}
|
||||
runint, err := instructions.ParseInstruction(&parser.Node{Original: `RUN echo foo`, Value: "run"})
|
||||
assert.NilError(t, err)
|
||||
run := runint.(*instructions.RunCommand)
|
||||
run.PrependShell = true
|
||||
|
||||
assert.NilError(t, dispatch(sb, run))
|
||||
assert.Check(t, is.DeepEqual(expectedTest, sb.state.runConfig.Healthcheck.Test))
|
||||
}
|
||||
|
@ -579,24 +586,33 @@ func TestDispatchUnsupportedOptions(t *testing.T) {
|
|||
sb.state.operatingSystem = runtime.GOOS
|
||||
|
||||
t.Run("ADD with chmod", func(t *testing.T) {
|
||||
cmd := &instructions.AddCommand{SourcesAndDest: []string{".", "."}, Chmod: "0655"}
|
||||
cmd := &instructions.AddCommand{
|
||||
SourcesAndDest: instructions.SourcesAndDest{
|
||||
SourcePaths: []string{"."},
|
||||
DestPath: ".",
|
||||
},
|
||||
Chmod: "0655",
|
||||
}
|
||||
err := dispatch(sb, cmd)
|
||||
assert.Error(t, err, "the --chmod option requires BuildKit. Refer to https://docs.docker.com/go/buildkit/ to learn how to build images with BuildKit enabled")
|
||||
})
|
||||
|
||||
t.Run("COPY with chmod", func(t *testing.T) {
|
||||
cmd := &instructions.CopyCommand{SourcesAndDest: []string{".", "."}, Chmod: "0655"}
|
||||
cmd := &instructions.CopyCommand{
|
||||
SourcesAndDest: instructions.SourcesAndDest{
|
||||
SourcePaths: []string{"."},
|
||||
DestPath: ".",
|
||||
},
|
||||
Chmod: "0655",
|
||||
}
|
||||
err := dispatch(sb, cmd)
|
||||
assert.Error(t, err, "the --chmod option requires BuildKit. Refer to https://docs.docker.com/go/buildkit/ to learn how to build images with BuildKit enabled")
|
||||
})
|
||||
|
||||
t.Run("RUN with unsupported options", func(t *testing.T) {
|
||||
cmd := &instructions.RunCommand{
|
||||
ShellDependantCmdLine: instructions.ShellDependantCmdLine{
|
||||
CmdLine: strslice.StrSlice{"echo foo"},
|
||||
PrependShell: true,
|
||||
},
|
||||
}
|
||||
runint, err := instructions.ParseInstruction(&parser.Node{Original: `RUN echo foo`, Value: "run"})
|
||||
assert.NilError(t, err)
|
||||
cmd := runint.(*instructions.RunCommand)
|
||||
|
||||
// classic builder "RUN" currently doesn't support any flags, but testing
|
||||
// both "known" flags and "bogus" flags for completeness, and in case
|
||||
|
|
|
@ -32,9 +32,8 @@ func TestDispatch(t *testing.T) {
|
|||
{
|
||||
name: "ADD multiple files to file",
|
||||
cmd: &instructions.AddCommand{SourcesAndDest: instructions.SourcesAndDest{
|
||||
"file1.txt",
|
||||
"file2.txt",
|
||||
"test",
|
||||
SourcePaths: []string{"file1.txt", "file2.txt"},
|
||||
DestPath: "test",
|
||||
}},
|
||||
expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /",
|
||||
files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"},
|
||||
|
@ -42,8 +41,8 @@ func TestDispatch(t *testing.T) {
|
|||
{
|
||||
name: "Wildcard ADD multiple files to file",
|
||||
cmd: &instructions.AddCommand{SourcesAndDest: instructions.SourcesAndDest{
|
||||
"file*.txt",
|
||||
"test",
|
||||
SourcePaths: []string{"file*.txt"},
|
||||
DestPath: "test",
|
||||
}},
|
||||
expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /",
|
||||
files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"},
|
||||
|
@ -51,9 +50,8 @@ func TestDispatch(t *testing.T) {
|
|||
{
|
||||
name: "COPY multiple files to file",
|
||||
cmd: &instructions.CopyCommand{SourcesAndDest: instructions.SourcesAndDest{
|
||||
"file1.txt",
|
||||
"file2.txt",
|
||||
"test",
|
||||
SourcePaths: []string{"file1.txt", "file2.txt"},
|
||||
DestPath: "test",
|
||||
}},
|
||||
expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /",
|
||||
files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"},
|
||||
|
@ -61,9 +59,8 @@ func TestDispatch(t *testing.T) {
|
|||
{
|
||||
name: "ADD multiple files to file with whitespace",
|
||||
cmd: &instructions.AddCommand{SourcesAndDest: instructions.SourcesAndDest{
|
||||
"test file1.txt",
|
||||
"test file2.txt",
|
||||
"test",
|
||||
SourcePaths: []string{"test file1.txt", "test file2.txt"},
|
||||
DestPath: "test",
|
||||
}},
|
||||
expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /",
|
||||
files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"},
|
||||
|
@ -71,9 +68,8 @@ func TestDispatch(t *testing.T) {
|
|||
{
|
||||
name: "COPY multiple files to file with whitespace",
|
||||
cmd: &instructions.CopyCommand{SourcesAndDest: instructions.SourcesAndDest{
|
||||
"test file1.txt",
|
||||
"test file2.txt",
|
||||
"test",
|
||||
SourcePaths: []string{"test file1.txt", "test file2.txt"},
|
||||
DestPath: "test",
|
||||
}},
|
||||
expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /",
|
||||
files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"},
|
||||
|
@ -81,8 +77,8 @@ func TestDispatch(t *testing.T) {
|
|||
{
|
||||
name: "COPY wildcard no files",
|
||||
cmd: &instructions.CopyCommand{SourcesAndDest: instructions.SourcesAndDest{
|
||||
"file*.txt",
|
||||
"/tmp/",
|
||||
SourcePaths: []string{"file*.txt"},
|
||||
DestPath: "/tmp/",
|
||||
}},
|
||||
expectedError: "COPY failed: no source files were specified",
|
||||
files: nil,
|
||||
|
@ -90,8 +86,8 @@ func TestDispatch(t *testing.T) {
|
|||
{
|
||||
name: "COPY url",
|
||||
cmd: &instructions.CopyCommand{SourcesAndDest: instructions.SourcesAndDest{
|
||||
"https://index.docker.io/robots.txt",
|
||||
"/",
|
||||
SourcePaths: []string{"https://index.docker.io/robots.txt"},
|
||||
DestPath: "/",
|
||||
}},
|
||||
expectedError: "source can't be a URL for COPY",
|
||||
files: nil,
|
||||
|
|
|
@ -42,7 +42,6 @@ import (
|
|||
"github.com/docker/docker/daemon/logger"
|
||||
"github.com/docker/docker/daemon/network"
|
||||
"github.com/docker/docker/errdefs"
|
||||
bkconfig "github.com/moby/buildkit/cmd/buildkitd/config"
|
||||
"github.com/moby/buildkit/util/resolver"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
|
@ -160,7 +159,7 @@ func (daemon *Daemon) RegistryHosts() docker.RegistryHosts {
|
|||
var (
|
||||
registryKey = "docker.io"
|
||||
mirrors = make([]string, len(daemon.configStore.Mirrors))
|
||||
m = map[string]bkconfig.RegistryConfig{}
|
||||
m = map[string]resolver.RegistryConfig{}
|
||||
)
|
||||
// must trim "https://" or "http://" prefix
|
||||
for i, v := range daemon.configStore.Mirrors {
|
||||
|
@ -170,11 +169,11 @@ func (daemon *Daemon) RegistryHosts() docker.RegistryHosts {
|
|||
mirrors[i] = v
|
||||
}
|
||||
// set mirrors for default registry
|
||||
m[registryKey] = bkconfig.RegistryConfig{Mirrors: mirrors}
|
||||
m[registryKey] = resolver.RegistryConfig{Mirrors: mirrors}
|
||||
|
||||
for _, v := range daemon.configStore.InsecureRegistries {
|
||||
u, err := url.Parse(v)
|
||||
c := bkconfig.RegistryConfig{}
|
||||
c := resolver.RegistryConfig{}
|
||||
if err == nil {
|
||||
v = u.Host
|
||||
t := true
|
||||
|
@ -198,7 +197,7 @@ func (daemon *Daemon) RegistryHosts() docker.RegistryHosts {
|
|||
if fis, err := ioutil.ReadDir(certsDir); err == nil {
|
||||
for _, fi := range fis {
|
||||
if _, ok := m[fi.Name()]; !ok {
|
||||
m[fi.Name()] = bkconfig.RegistryConfig{
|
||||
m[fi.Name()] = resolver.RegistryConfig{
|
||||
TLSConfigDir: []string{filepath.Join(certsDir, fi.Name())},
|
||||
}
|
||||
}
|
||||
|
|
13
vendor.conf
13
vendor.conf
|
@ -18,7 +18,7 @@ github.com/moby/sys b0f1fd7235275d01bd35cc4421e8
|
|||
github.com/creack/pty 2a38352e8b4d7ab6c336eef107e42a55e72e7fbc # v1.1.11
|
||||
github.com/sirupsen/logrus 6699a89a232f3db797f2e280639854bbc4b89725 # v1.7.0
|
||||
github.com/tchap/go-patricia a7f0089c6f496e8e70402f61733606daa326cac5 # v2.3.0
|
||||
golang.org/x/net 6772e930b67bb09bf22262c7378e7d2f67cf59d1
|
||||
golang.org/x/net e18ecbb051101a46fc263334b127c89bc7bff7ea
|
||||
golang.org/x/sys d19ff857e887eacb631721f188c7d365c2331456
|
||||
github.com/docker/go-units 519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0
|
||||
github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4.0
|
||||
|
@ -29,11 +29,11 @@ github.com/syndtr/gocapability 42c35b4376354fd554efc7ad35e0
|
|||
|
||||
github.com/RackSec/srslog a4725f04ec91af1a91b380da679d6e0c2f061e59
|
||||
github.com/imdario/mergo 1afb36080aec31e0d1528973ebe6721b191b0369 # v0.3.8
|
||||
golang.org/x/sync 6e8e738ad208923de99951fe0b48239bfd864f28
|
||||
golang.org/x/sync 036812b2e83c0ddf193dd5a34e034151da389d09
|
||||
|
||||
# buildkit
|
||||
github.com/moby/buildkit 7e03277b32d4f0150bed0e081d4253b3a8557f13 https://github.com/cpuguy83/buildkit.git # v0.8.3-3-g244e8cde + libnetwork changes
|
||||
github.com/tonistiigi/fsutil 0834f99b7b85462efb69b4f571a4fa3ca7da5ac9
|
||||
github.com/moby/buildkit 9f254e18360a24c2ae47b26f772c3c89533bcbb7 # master / v0.9.0-dev
|
||||
github.com/tonistiigi/fsutil d72af97c0eaf93c1d20360e3cb9c63c223675b83
|
||||
github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e75b9e3569de2
|
||||
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
|
||||
github.com/opentracing/opentracing-go d34af3eaa63c4d08ab54863a4bdd0daa45212e12 # v1.2.0
|
||||
|
@ -63,7 +63,6 @@ github.com/vishvananda/netlink f049be6f391489d3f374498fe0c8
|
|||
github.com/moby/ipvs 4566ccea0e08d68e9614c3e7a64a23b850c4bb35 # v1.0.1
|
||||
github.com/urfave/cli a65b733b303f0055f8d324d805f393cd3e7a7904
|
||||
|
||||
github.com/BurntSushi/toml 3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005 # v0.3.1
|
||||
github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
|
||||
github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
|
||||
github.com/coreos/etcd 2c834459e1aab78a5d5219c7dfe42335fc4b617a # v3.3.25
|
||||
|
@ -151,8 +150,8 @@ github.com/golang/protobuf 84668698ea25b64748563aa20726
|
|||
github.com/cloudflare/cfssl 5d63dbd981b5c408effbb58c442d54761ff94fbd # 1.3.2
|
||||
github.com/fernet/fernet-go 9eac43b88a5efb8651d24de9b68e87567e029736
|
||||
github.com/google/certificate-transparency-go 37a384cd035e722ea46e55029093e26687138edf # v1.0.20
|
||||
golang.org/x/crypto c1f2f97bffc9c53fc40a1a28a5b460094c0050d9
|
||||
golang.org/x/time 555d28b269f0569763d25dbe1a237ae74c6bcc82
|
||||
golang.org/x/crypto 0c34fe9e7dc2486962ef9867e3edb3503537209f
|
||||
golang.org/x/time 3af7569d3a1e776fc2a3c1cec133b43105ea9c2e
|
||||
github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
|
||||
github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git
|
||||
github.com/hashicorp/golang-lru 7f827b33c0f158ec5dfbba01bb0b14a4541fd81d # v0.5.3
|
||||
|
|
21
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
21
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
|
@ -1,21 +0,0 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 TOML authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
218
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
218
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
|
@ -1,218 +0,0 @@
|
|||
## TOML parser and encoder for Go with reflection
|
||||
|
||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
packages. This package also supports the `encoding.TextUnmarshaler` and
|
||||
`encoding.TextMarshaler` interfaces so that you can define custom data
|
||||
representations. (There is an example of this below.)
|
||||
|
||||
Spec: https://github.com/toml-lang/toml
|
||||
|
||||
Compatible with TOML version
|
||||
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
|
||||
|
||||
Documentation: https://godoc.org/github.com/BurntSushi/toml
|
||||
|
||||
Installation:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml
|
||||
```
|
||||
|
||||
Try the toml validator:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml/cmd/tomlv
|
||||
tomlv some-toml-file.toml
|
||||
```
|
||||
|
||||
[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
|
||||
|
||||
### Testing
|
||||
|
||||
This package passes all tests in
|
||||
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
|
||||
and the encoder.
|
||||
|
||||
### Examples
|
||||
|
||||
This package works similarly to how the Go standard library handles `XML`
|
||||
and `JSON`. Namely, data is loaded into Go values via reflection.
|
||||
|
||||
For the simplest example, consider some TOML file as just a list of keys
|
||||
and values:
|
||||
|
||||
```toml
|
||||
Age = 25
|
||||
Cats = [ "Cauchy", "Plato" ]
|
||||
Pi = 3.14
|
||||
Perfection = [ 6, 28, 496, 8128 ]
|
||||
DOB = 1987-07-05T05:45:00Z
|
||||
```
|
||||
|
||||
Which could be defined in Go as:
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
Age int
|
||||
Cats []string
|
||||
Pi float64
|
||||
Perfection []int
|
||||
DOB time.Time // requires `import time`
|
||||
}
|
||||
```
|
||||
|
||||
And then decoded with:
|
||||
|
||||
```go
|
||||
var conf Config
|
||||
if _, err := toml.Decode(tomlData, &conf); err != nil {
|
||||
// handle error
|
||||
}
|
||||
```
|
||||
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML
|
||||
key value directly:
|
||||
|
||||
```toml
|
||||
some_key_NAME = "wat"
|
||||
```
|
||||
|
||||
```go
|
||||
type TOML struct {
|
||||
ObscureKey string `toml:"some_key_NAME"`
|
||||
}
|
||||
```
|
||||
|
||||
### Using the `encoding.TextUnmarshaler` interface
|
||||
|
||||
Here's an example that automatically parses duration strings into
|
||||
`time.Duration` values:
|
||||
|
||||
```toml
|
||||
[[song]]
|
||||
name = "Thunder Road"
|
||||
duration = "4m49s"
|
||||
|
||||
[[song]]
|
||||
name = "Stairway to Heaven"
|
||||
duration = "8m03s"
|
||||
```
|
||||
|
||||
Which can be decoded with:
|
||||
|
||||
```go
|
||||
type song struct {
|
||||
Name string
|
||||
Duration duration
|
||||
}
|
||||
type songs struct {
|
||||
Song []song
|
||||
}
|
||||
var favorites songs
|
||||
if _, err := toml.Decode(blob, &favorites); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, s := range favorites.Song {
|
||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
||||
}
|
||||
```
|
||||
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
`encoding.TextUnmarshaler` interface:
|
||||
|
||||
```go
|
||||
type duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
d.Duration, err = time.ParseDuration(string(text))
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
### More complex usage
|
||||
|
||||
Here's an example of how to load the example from the official spec page:
|
||||
|
||||
```toml
|
||||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
||||
|
||||
# Line breaks are OK when inside arrays
|
||||
hosts = [
|
||||
"alpha",
|
||||
"omega"
|
||||
]
|
||||
```
|
||||
|
||||
And the corresponding Go types are:
|
||||
|
||||
```go
|
||||
type tomlConfig struct {
|
||||
Title string
|
||||
Owner ownerInfo
|
||||
DB database `toml:"database"`
|
||||
Servers map[string]server
|
||||
Clients clients
|
||||
}
|
||||
|
||||
type ownerInfo struct {
|
||||
Name string
|
||||
Org string `toml:"organization"`
|
||||
Bio string
|
||||
DOB time.Time
|
||||
}
|
||||
|
||||
type database struct {
|
||||
Server string
|
||||
Ports []int
|
||||
ConnMax int `toml:"connection_max"`
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
type server struct {
|
||||
IP string
|
||||
DC string
|
||||
}
|
||||
|
||||
type clients struct {
|
||||
Data [][]interface{}
|
||||
Hosts []string
|
||||
}
|
||||
```
|
||||
|
||||
Note that a case insensitive match will be tried if an exact match can't be
|
||||
found.
|
||||
|
||||
A working example of the above can be found in `_examples/example.{go,toml}`.
|
509
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
509
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
|
@ -1,509 +0,0 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func e(format string, args ...interface{}) error {
|
||||
return fmt.Errorf("toml: "+format, args...)
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
||||
// TOML description of themselves.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalTOML(interface{}) error
|
||||
}
|
||||
|
||||
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
|
||||
func Unmarshal(p []byte, v interface{}) error {
|
||||
_, err := Decode(string(p), v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
||||
// When using the various `Decode*` functions, the type `Primitive` may
|
||||
// be given to any value, and its decoding will be delayed.
|
||||
//
|
||||
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
|
||||
//
|
||||
// The underlying representation of a `Primitive` value is subject to change.
|
||||
// Do not rely on it.
|
||||
//
|
||||
// N.B. Primitive values are still parsed, so using them will only avoid
|
||||
// the overhead of reflection. They can be useful when you don't know the
|
||||
// exact type of TOML data until run time.
|
||||
type Primitive struct {
|
||||
undecoded interface{}
|
||||
context Key
|
||||
}
|
||||
|
||||
// DEPRECATED!
|
||||
//
|
||||
// Use MetaData.PrimitiveDecode instead.
|
||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md := MetaData{decoded: make(map[string]bool)}
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
||||
// can *only* be obtained from values filled by the decoder functions,
|
||||
// including this method. (i.e., `v` may contain more `Primitive`
|
||||
// values.)
|
||||
//
|
||||
// Meta data for primitive values is included in the meta data returned by
|
||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
||||
// behind a Primitive will be considered undecoded. Executing this method will
|
||||
// update the undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// Decode will decode the contents of `data` in TOML format into a pointer
|
||||
// `v`.
|
||||
//
|
||||
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
|
||||
// used interchangeably.)
|
||||
//
|
||||
// TOML arrays of tables correspond to either a slice of structs or a slice
|
||||
// of maps.
|
||||
//
|
||||
// TOML datetimes correspond to Go `time.Time` values.
|
||||
//
|
||||
// All other TOML types (float, string, int, bool and array) correspond
|
||||
// to the obvious Go types.
|
||||
//
|
||||
// An exception to the above rules is if a type implements the
|
||||
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
|
||||
// (floats, strings, integers, booleans and datetimes) will be converted to
|
||||
// a byte string and given to the value's UnmarshalText method. See the
|
||||
// Unmarshaler example for a demonstration with time duration strings.
|
||||
//
|
||||
// Key mapping
|
||||
//
|
||||
// TOML keys can map to either keys in a Go map or field names in a Go
|
||||
// struct. The special `toml` struct tag may be used to map TOML keys to
|
||||
// struct fields that don't match the key name exactly. (See the example.)
|
||||
// A case insensitive match to struct names will be tried if an exact match
|
||||
// can't be found.
|
||||
//
|
||||
// The mapping between TOML values and Go values is loose. That is, there
|
||||
// may exist TOML values that cannot be placed into your representation, and
|
||||
// there may be parts of your representation that do not correspond to
|
||||
// TOML values. This loose mapping can be made stricter by using the IsDefined
|
||||
// and/or Undecoded methods on the MetaData returned.
|
||||
//
|
||||
// This decoder will not handle cyclic types. If a cyclic type is passed,
|
||||
// `Decode` will not terminate.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
|
||||
}
|
||||
if rv.IsNil() {
|
||||
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
|
||||
}
|
||||
p, err := parse(data)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
md := MetaData{
|
||||
p.mapping, p.types, p.ordered,
|
||||
make(map[string]bool, len(p.ordered)), nil,
|
||||
}
|
||||
return md, md.unify(p.mapping, indirect(rv))
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at `fpath` and decode it for you.
|
||||
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadFile(fpath)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// DecodeReader is just like Decode, except it will consume all bytes
|
||||
// from the reader and decode it for you.
|
||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// unify performs a sort of type unification based on the structure of `rv`,
|
||||
// which is the client representation.
|
||||
//
|
||||
// Any type mismatch produces an error. Finding a type that we don't know
|
||||
// how to handle produces an unsupported type error.
|
||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
|
||||
// Special case. Look for a `Primitive` value.
|
||||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
|
||||
// Save the undecoded data and the key context into the primitive
|
||||
// value.
|
||||
context := make(Key, len(md.context))
|
||||
copy(context, md.context)
|
||||
rv.Set(reflect.ValueOf(Primitive{
|
||||
undecoded: data,
|
||||
context: context,
|
||||
}))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Special case. Unmarshaler Interface support.
|
||||
if rv.CanAddr() {
|
||||
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
}
|
||||
}
|
||||
|
||||
// Special case. Handle time.Time values specifically.
|
||||
// TODO: Remove this code when we decide to drop support for Go 1.1.
|
||||
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
|
||||
// interfaces.
|
||||
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
|
||||
return md.unifyDatetime(data, rv)
|
||||
}
|
||||
|
||||
// Special case. Look for a value satisfying the TextUnmarshaler interface.
|
||||
if v, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
return md.unifyText(data, v)
|
||||
}
|
||||
// BUG(burntsushi)
|
||||
// The behavior here is incorrect whenever a Go type satisfies the
|
||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML
|
||||
// hash or array. In particular, the unmarshaler should only be applied
|
||||
// to primitive TOML values. But at this point, it will be applied to
|
||||
// all kinds of values and produce an incorrect error whenever those values
|
||||
// are hashes or arrays (including arrays of tables).
|
||||
|
||||
k := rv.Kind()
|
||||
|
||||
// laziness
|
||||
if k >= reflect.Int && k <= reflect.Uint64 {
|
||||
return md.unifyInt(data, rv)
|
||||
}
|
||||
switch k {
|
||||
case reflect.Ptr:
|
||||
elem := reflect.New(rv.Type().Elem())
|
||||
err := md.unify(data, reflect.Indirect(elem))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rv.Set(elem)
|
||||
return nil
|
||||
case reflect.Struct:
|
||||
return md.unifyStruct(data, rv)
|
||||
case reflect.Map:
|
||||
return md.unifyMap(data, rv)
|
||||
case reflect.Array:
|
||||
return md.unifyArray(data, rv)
|
||||
case reflect.Slice:
|
||||
return md.unifySlice(data, rv)
|
||||
case reflect.String:
|
||||
return md.unifyString(data, rv)
|
||||
case reflect.Bool:
|
||||
return md.unifyBool(data, rv)
|
||||
case reflect.Interface:
|
||||
// we only support empty interfaces.
|
||||
if rv.NumMethod() > 0 {
|
||||
return e("unsupported type %s", rv.Type())
|
||||
}
|
||||
return md.unifyAnything(data, rv)
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
return md.unifyFloat64(data, rv)
|
||||
}
|
||||
return e("unsupported type %s", rv.Kind())
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
if mapping == nil {
|
||||
return nil
|
||||
}
|
||||
return e("type mismatch for %s: expected table but found %T",
|
||||
rv.Type().String(), mapping)
|
||||
}
|
||||
|
||||
for key, datum := range tmap {
|
||||
var f *field
|
||||
fields := cachedTypeFields(rv.Type())
|
||||
for i := range fields {
|
||||
ff := &fields[i]
|
||||
if ff.name == key {
|
||||
f = ff
|
||||
break
|
||||
}
|
||||
if f == nil && strings.EqualFold(ff.name, key) {
|
||||
f = ff
|
||||
}
|
||||
}
|
||||
if f != nil {
|
||||
subv := rv
|
||||
for _, i := range f.index {
|
||||
subv = indirect(subv.Field(i))
|
||||
}
|
||||
if isUnifiable(subv) {
|
||||
md.decoded[md.context.add(key).String()] = true
|
||||
md.context = append(md.context, key)
|
||||
if err := md.unify(datum, subv); err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
} else if f.name != "" {
|
||||
// Bad user! No soup for you!
|
||||
return e("cannot write unexported field %s.%s",
|
||||
rv.Type().String(), f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
if tmap == nil {
|
||||
return nil
|
||||
}
|
||||
return badtype("map", mapping)
|
||||
}
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.MakeMap(rv.Type()))
|
||||
}
|
||||
for k, v := range tmap {
|
||||
md.decoded[md.context.add(k).String()] = true
|
||||
md.context = append(md.context, k)
|
||||
|
||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
||||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
|
||||
if err := md.unify(v, rvval); err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
|
||||
rvkey.SetString(k)
|
||||
rv.SetMapIndex(rvkey, rvval)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
}
|
||||
sliceLen := datav.Len()
|
||||
if sliceLen != rv.Len() {
|
||||
return e("expected array length %d; got TOML array of length %d",
|
||||
rv.Len(), sliceLen)
|
||||
}
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
}
|
||||
n := datav.Len()
|
||||
if rv.IsNil() || rv.Cap() < n {
|
||||
rv.Set(reflect.MakeSlice(rv.Type(), n, n))
|
||||
}
|
||||
rv.SetLen(n)
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
||||
sliceLen := data.Len()
|
||||
for i := 0; i < sliceLen; i++ {
|
||||
v := data.Index(i).Interface()
|
||||
sliceval := indirect(rv.Index(i))
|
||||
if err := md.unify(v, sliceval); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
|
||||
if _, ok := data.(time.Time); ok {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
return badtype("time.Time", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
if s, ok := data.(string); ok {
|
||||
rv.SetString(s)
|
||||
return nil
|
||||
}
|
||||
return badtype("string", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(float64); ok {
|
||||
switch rv.Kind() {
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
rv.SetFloat(num)
|
||||
default:
|
||||
panic("bug")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("float", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(int64); ok {
|
||||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Int8:
|
||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
||||
return e("value %d is out of range for int8", num)
|
||||
}
|
||||
case reflect.Int16:
|
||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
||||
return e("value %d is out of range for int16", num)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
||||
return e("value %d is out of range for int32", num)
|
||||
}
|
||||
}
|
||||
rv.SetInt(num)
|
||||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
|
||||
unum := uint64(num)
|
||||
switch rv.Kind() {
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Uint8:
|
||||
if num < 0 || unum > math.MaxUint8 {
|
||||
return e("value %d is out of range for uint8", num)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if num < 0 || unum > math.MaxUint16 {
|
||||
return e("value %d is out of range for uint16", num)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if num < 0 || unum > math.MaxUint32 {
|
||||
return e("value %d is out of range for uint32", num)
|
||||
}
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
} else {
|
||||
panic("unreachable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("integer", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||
if b, ok := data.(bool); ok {
|
||||
rv.SetBool(b)
|
||||
return nil
|
||||
}
|
||||
return badtype("boolean", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
|
||||
var s string
|
||||
switch sdata := data.(type) {
|
||||
case TextMarshaler:
|
||||
text, err := sdata.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s = string(text)
|
||||
case fmt.Stringer:
|
||||
s = sdata.String()
|
||||
case string:
|
||||
s = sdata
|
||||
case bool:
|
||||
s = fmt.Sprintf("%v", sdata)
|
||||
case int64:
|
||||
s = fmt.Sprintf("%d", sdata)
|
||||
case float64:
|
||||
s = fmt.Sprintf("%f", sdata)
|
||||
default:
|
||||
return badtype("primitive (string-like)", data)
|
||||
}
|
||||
if err := v.UnmarshalText([]byte(s)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
||||
func rvalue(v interface{}) reflect.Value {
|
||||
return indirect(reflect.ValueOf(v))
|
||||
}
|
||||
|
||||
// indirect returns the value pointed to by a pointer.
|
||||
// Pointers are followed until the value is not a pointer.
|
||||
// New values are allocated for each nil pointer.
|
||||
//
|
||||
// An exception to this rule is if the value satisfies an interface of
|
||||
// interest to us (like encoding.TextUnmarshaler).
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() != reflect.Ptr {
|
||||
if v.CanSet() {
|
||||
pv := v.Addr()
|
||||
if _, ok := pv.Interface().(TextUnmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
return indirect(reflect.Indirect(v))
|
||||
}
|
||||
|
||||
func isUnifiable(rv reflect.Value) bool {
|
||||
if rv.CanSet() {
|
||||
return true
|
||||
}
|
||||
if _, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func badtype(expected string, data interface{}) error {
|
||||
return e("cannot load TOML value of type %T into a Go %s", data, expected)
|
||||
}
|
121
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
121
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
|
@ -1,121 +0,0 @@
|
|||
package toml
|
||||
|
||||
import "strings"
|
||||
|
||||
// MetaData allows access to meta information about TOML data that may not
|
||||
// be inferrable via reflection. In particular, whether a key has been defined
|
||||
// and the TOML type of a key.
|
||||
type MetaData struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
keys []Key
|
||||
decoded map[string]bool
|
||||
context Key // Used only during decoding.
|
||||
}
|
||||
|
||||
// IsDefined returns true if the key given exists in the TOML data. The key
|
||||
// should be specified hierarchially. e.g.,
|
||||
//
|
||||
// // access the TOML key 'a.b.c'
|
||||
// IsDefined("a", "b", "c")
|
||||
//
|
||||
// IsDefined will return false if an empty key given. Keys are case sensitive.
|
||||
func (md *MetaData) IsDefined(key ...string) bool {
|
||||
if len(key) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var hash map[string]interface{}
|
||||
var ok bool
|
||||
var hashOrVal interface{} = md.mapping
|
||||
for _, k := range key {
|
||||
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
|
||||
return false
|
||||
}
|
||||
if hashOrVal, ok = hash[k]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Type returns a string representation of the type of the key specified.
|
||||
//
|
||||
// Type will return the empty string if given an empty key or a key that
|
||||
// does not exist. Keys are case sensitive.
|
||||
func (md *MetaData) Type(key ...string) string {
|
||||
fullkey := strings.Join(key, ".")
|
||||
if typ, ok := md.types[fullkey]; ok {
|
||||
return typ.typeString()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
|
||||
// to get values of this type.
|
||||
type Key []string
|
||||
|
||||
func (k Key) String() string {
|
||||
return strings.Join(k, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuotedAll() string {
|
||||
var ss []string
|
||||
for i := range k {
|
||||
ss = append(ss, k.maybeQuoted(i))
|
||||
}
|
||||
return strings.Join(ss, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuoted(i int) string {
|
||||
quote := false
|
||||
for _, c := range k[i] {
|
||||
if !isBareKeyChar(c) {
|
||||
quote = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if quote {
|
||||
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
|
||||
}
|
||||
return k[i]
|
||||
}
|
||||
|
||||
func (k Key) add(piece string) Key {
|
||||
newKey := make(Key, len(k)+1)
|
||||
copy(newKey, k)
|
||||
newKey[len(k)] = piece
|
||||
return newKey
|
||||
}
|
||||
|
||||
// Keys returns a slice of every key in the TOML data, including key groups.
|
||||
// Each key is itself a slice, where the first element is the top of the
|
||||
// hierarchy and the last is the most specific.
|
||||
//
|
||||
// The list will have the same order as the keys appeared in the TOML data.
|
||||
//
|
||||
// All keys returned are non-empty.
|
||||
func (md *MetaData) Keys() []Key {
|
||||
return md.keys
|
||||
}
|
||||
|
||||
// Undecoded returns all keys that have not been decoded in the order in which
|
||||
// they appear in the original TOML document.
|
||||
//
|
||||
// This includes keys that haven't been decoded because of a Primitive value.
|
||||
// Once the Primitive value is decoded, the keys will be considered decoded.
|
||||
//
|
||||
// Also note that decoding into an empty interface will result in no decoding,
|
||||
// and so no keys will be considered decoded.
|
||||
//
|
||||
// In this sense, the Undecoded keys correspond to keys in the TOML document
|
||||
// that do not have a concrete type in your representation.
|
||||
func (md *MetaData) Undecoded() []Key {
|
||||
undecoded := make([]Key, 0, len(md.keys))
|
||||
for _, key := range md.keys {
|
||||
if !md.decoded[key.String()] {
|
||||
undecoded = append(undecoded, key)
|
||||
}
|
||||
}
|
||||
return undecoded
|
||||
}
|
27
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
27
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
|
@ -1,27 +0,0 @@
|
|||
/*
|
||||
Package toml provides facilities for decoding and encoding TOML configuration
|
||||
files via reflection. There is also support for delaying decoding with
|
||||
the Primitive type, and querying the set of keys in a TOML document with the
|
||||
MetaData type.
|
||||
|
||||
The specification implemented: https://github.com/toml-lang/toml
|
||||
|
||||
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
|
||||
whether a file is a valid TOML document. It can also be used to print the
|
||||
type of each key in a TOML document.
|
||||
|
||||
Testing
|
||||
|
||||
There are two important types of tests used for this package. The first is
|
||||
contained inside '*_test.go' files and uses the standard Go unit testing
|
||||
framework. These tests are primarily devoted to holistically testing the
|
||||
decoder and encoder.
|
||||
|
||||
The second type of testing is used to verify the implementation's adherence
|
||||
to the TOML specification. These tests have been factored into their own
|
||||
project: https://github.com/BurntSushi/toml-test
|
||||
|
||||
The reason the tests are in a separate project is so that they can be used by
|
||||
any implementation of TOML. Namely, it is language agnostic.
|
||||
*/
|
||||
package toml
|
568
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
568
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
|
@ -1,568 +0,0 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type tomlEncodeError struct{ error }
|
||||
|
||||
var (
|
||||
errArrayMixedElementTypes = errors.New(
|
||||
"toml: cannot encode array with mixed element types")
|
||||
errArrayNilElement = errors.New(
|
||||
"toml: cannot encode array with nil element")
|
||||
errNonString = errors.New(
|
||||
"toml: cannot encode a map with non-string key type")
|
||||
errAnonNonStruct = errors.New(
|
||||
"toml: cannot encode an anonymous field that is not a struct")
|
||||
errArrayNoTable = errors.New(
|
||||
"toml: TOML array element cannot contain a table")
|
||||
errNoKey = errors.New(
|
||||
"toml: top-level values must be Go maps or structs")
|
||||
errAnything = errors.New("") // used in testing
|
||||
)
|
||||
|
||||
var quotedReplacer = strings.NewReplacer(
|
||||
"\t", "\\t",
|
||||
"\n", "\\n",
|
||||
"\r", "\\r",
|
||||
"\"", "\\\"",
|
||||
"\\", "\\\\",
|
||||
)
|
||||
|
||||
// Encoder controls the encoding of Go values to a TOML document to some
|
||||
// io.Writer.
|
||||
//
|
||||
// The indentation level can be controlled with the Indent field.
|
||||
type Encoder struct {
|
||||
// A single indentation level. By default it is two spaces.
|
||||
Indent string
|
||||
|
||||
// hasWritten is whether we have written any output to w yet.
|
||||
hasWritten bool
|
||||
w *bufio.Writer
|
||||
}
|
||||
|
||||
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
|
||||
// given. By default, a single indentation level is 2 spaces.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
w: bufio.NewWriter(w),
|
||||
Indent: " ",
|
||||
}
|
||||
}
|
||||
|
||||
// Encode writes a TOML representation of the Go value to the underlying
|
||||
// io.Writer. If the value given cannot be encoded to a valid TOML document,
|
||||
// then an error is returned.
|
||||
//
|
||||
// The mapping between Go values and TOML values should be precisely the same
|
||||
// as for the Decode* functions. Similarly, the TextMarshaler interface is
|
||||
// supported by encoding the resulting bytes as strings. (If you want to write
|
||||
// arbitrary binary data then you will need to use something like base64 since
|
||||
// TOML does not have any binary types.)
|
||||
//
|
||||
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
|
||||
// sub-hashes are encoded first.
|
||||
//
|
||||
// If a Go map is encoded, then its keys are sorted alphabetically for
|
||||
// deterministic output. More control over this behavior may be provided if
|
||||
// there is demand for it.
|
||||
//
|
||||
// Encoding Go values without a corresponding TOML representation---like map
|
||||
// types with non-string keys---will cause an error to be returned. Similarly
|
||||
// for mixed arrays/slices, arrays/slices with nil elements, embedded
|
||||
// non-struct types and nested slices containing maps or structs.
|
||||
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
|
||||
// and so is []map[string][]string.)
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
rv := eindirect(reflect.ValueOf(v))
|
||||
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
|
||||
return err
|
||||
}
|
||||
return enc.w.Flush()
|
||||
}
|
||||
|
||||
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if terr, ok := r.(tomlEncodeError); ok {
|
||||
err = terr.error
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
enc.encode(key, rv)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
||||
// Special case. Time needs to be in ISO8601 format.
|
||||
// Special case. If we can marshal the type to text, then we used that.
|
||||
// Basically, this prevents the encoder for handling these types as
|
||||
// generic structs (or whatever the underlying type of a TextMarshaler is).
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time, TextMarshaler:
|
||||
enc.keyEqElement(key, rv)
|
||||
return
|
||||
}
|
||||
|
||||
k := rv.Kind()
|
||||
switch k {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
||||
reflect.Uint64,
|
||||
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
|
||||
enc.keyEqElement(key, rv)
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
|
||||
enc.eArrayOfTables(key, rv)
|
||||
} else {
|
||||
enc.keyEqElement(key, rv)
|
||||
}
|
||||
case reflect.Interface:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.encode(key, rv.Elem())
|
||||
case reflect.Map:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.eTable(key, rv)
|
||||
case reflect.Ptr:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.encode(key, rv.Elem())
|
||||
case reflect.Struct:
|
||||
enc.eTable(key, rv)
|
||||
default:
|
||||
panic(e("unsupported type for key '%s': %s", key, k))
|
||||
}
|
||||
}
|
||||
|
||||
// eElement encodes any value that can be an array element (primitives and
|
||||
// arrays).
|
||||
func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
switch v := rv.Interface().(type) {
|
||||
case time.Time:
|
||||
// Special case time.Time as a primitive. Has to come before
|
||||
// TextMarshaler below because time.Time implements
|
||||
// encoding.TextMarshaler, but we need to always use UTC.
|
||||
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
|
||||
return
|
||||
case TextMarshaler:
|
||||
// Special case. Use text marshaler if it's available for this value.
|
||||
if s, err := v.MarshalText(); err != nil {
|
||||
encPanic(err)
|
||||
} else {
|
||||
enc.writeQuoted(string(s))
|
||||
}
|
||||
return
|
||||
}
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
enc.wf(strconv.FormatBool(rv.Bool()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64:
|
||||
enc.wf(strconv.FormatInt(rv.Int(), 10))
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16,
|
||||
reflect.Uint32, reflect.Uint64:
|
||||
enc.wf(strconv.FormatUint(rv.Uint(), 10))
|
||||
case reflect.Float32:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
|
||||
case reflect.Float64:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
|
||||
case reflect.Array, reflect.Slice:
|
||||
enc.eArrayOrSliceElement(rv)
|
||||
case reflect.Interface:
|
||||
enc.eElement(rv.Elem())
|
||||
case reflect.String:
|
||||
enc.writeQuoted(rv.String())
|
||||
default:
|
||||
panic(e("unexpected primitive type: %s", rv.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
// By the TOML spec, all floats must have a decimal with at least one
|
||||
// number on either side.
|
||||
func floatAddDecimal(fstr string) string {
|
||||
if !strings.Contains(fstr, ".") {
|
||||
return fstr + ".0"
|
||||
}
|
||||
return fstr
|
||||
}
|
||||
|
||||
func (enc *Encoder) writeQuoted(s string) {
|
||||
enc.wf("\"%s\"", quotedReplacer.Replace(s))
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
|
||||
length := rv.Len()
|
||||
enc.wf("[")
|
||||
for i := 0; i < length; i++ {
|
||||
elem := rv.Index(i)
|
||||
enc.eElement(elem)
|
||||
if i != length-1 {
|
||||
enc.wf(", ")
|
||||
}
|
||||
}
|
||||
enc.wf("]")
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
trv := rv.Index(i)
|
||||
if isNil(trv) {
|
||||
continue
|
||||
}
|
||||
panicIfInvalidKey(key)
|
||||
enc.newline()
|
||||
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
|
||||
enc.newline()
|
||||
enc.eMapOrStruct(key, trv)
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
||||
panicIfInvalidKey(key)
|
||||
if len(key) == 1 {
|
||||
// Output an extra newline between top-level tables.
|
||||
// (The newline isn't written if nothing else has been written though.)
|
||||
enc.newline()
|
||||
}
|
||||
if len(key) > 0 {
|
||||
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
|
||||
enc.newline()
|
||||
}
|
||||
enc.eMapOrStruct(key, rv)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
|
||||
switch rv := eindirect(rv); rv.Kind() {
|
||||
case reflect.Map:
|
||||
enc.eMap(key, rv)
|
||||
case reflect.Struct:
|
||||
enc.eStruct(key, rv)
|
||||
default:
|
||||
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
|
||||
rt := rv.Type()
|
||||
if rt.Key().Kind() != reflect.String {
|
||||
encPanic(errNonString)
|
||||
}
|
||||
|
||||
// Sort keys so that we have deterministic output. And write keys directly
|
||||
// underneath this key first, before writing sub-structs or sub-maps.
|
||||
var mapKeysDirect, mapKeysSub []string
|
||||
for _, mapKey := range rv.MapKeys() {
|
||||
k := mapKey.String()
|
||||
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
|
||||
mapKeysSub = append(mapKeysSub, k)
|
||||
} else {
|
||||
mapKeysDirect = append(mapKeysDirect, k)
|
||||
}
|
||||
}
|
||||
|
||||
var writeMapKeys = func(mapKeys []string) {
|
||||
sort.Strings(mapKeys)
|
||||
for _, mapKey := range mapKeys {
|
||||
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
|
||||
if isNil(mrv) {
|
||||
// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
enc.encode(key.add(mapKey), mrv)
|
||||
}
|
||||
}
|
||||
writeMapKeys(mapKeysDirect)
|
||||
writeMapKeys(mapKeysSub)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
|
||||
// Write keys for fields directly under this key first, because if we write
|
||||
// a field that creates a new table, then all keys under it will be in that
|
||||
// table (not the one we're writing here).
|
||||
rt := rv.Type()
|
||||
var fieldsDirect, fieldsSub [][]int
|
||||
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
|
||||
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
// skip unexported fields
|
||||
if f.PkgPath != "" && !f.Anonymous {
|
||||
continue
|
||||
}
|
||||
frv := rv.Field(i)
|
||||
if f.Anonymous {
|
||||
t := f.Type
|
||||
switch t.Kind() {
|
||||
case reflect.Struct:
|
||||
// Treat anonymous struct fields with
|
||||
// tag names as though they are not
|
||||
// anonymous, like encoding/json does.
|
||||
if getOptions(f.Tag).name == "" {
|
||||
addFields(t, frv, f.Index)
|
||||
continue
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() == reflect.Struct &&
|
||||
getOptions(f.Tag).name == "" {
|
||||
if !frv.IsNil() {
|
||||
addFields(t.Elem(), frv.Elem(), f.Index)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Fall through to the normal field encoding logic below
|
||||
// for non-struct anonymous fields.
|
||||
}
|
||||
}
|
||||
|
||||
if typeIsHash(tomlTypeOfGo(frv)) {
|
||||
fieldsSub = append(fieldsSub, append(start, f.Index...))
|
||||
} else {
|
||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
||||
}
|
||||
}
|
||||
}
|
||||
addFields(rt, rv, nil)
|
||||
|
||||
var writeFields = func(fields [][]int) {
|
||||
for _, fieldIndex := range fields {
|
||||
sft := rt.FieldByIndex(fieldIndex)
|
||||
sf := rv.FieldByIndex(fieldIndex)
|
||||
if isNil(sf) {
|
||||
// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
|
||||
opts := getOptions(sft.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
keyName := sft.Name
|
||||
if opts.name != "" {
|
||||
keyName = opts.name
|
||||
}
|
||||
if opts.omitempty && isEmpty(sf) {
|
||||
continue
|
||||
}
|
||||
if opts.omitzero && isZero(sf) {
|
||||
continue
|
||||
}
|
||||
|
||||
enc.encode(key.add(keyName), sf)
|
||||
}
|
||||
}
|
||||
writeFields(fieldsDirect)
|
||||
writeFields(fieldsSub)
|
||||
}
|
||||
|
||||
// tomlTypeName returns the TOML type name of the Go value's type. It is
|
||||
// used to determine whether the types of array elements are mixed (which is
|
||||
// forbidden). If the Go value is nil, then it is illegal for it to be an array
|
||||
// element, and valueIsNil is returned as true.
|
||||
|
||||
// Returns the TOML type of a Go value. The type may be `nil`, which means
|
||||
// no concrete TOML type could be found.
|
||||
func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() {
|
||||
return nil
|
||||
}
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
return tomlBool
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
||||
reflect.Uint64:
|
||||
return tomlInteger
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return tomlFloat
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
||||
return tomlArrayHash
|
||||
}
|
||||
return tomlArray
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return tomlTypeOfGo(rv.Elem())
|
||||
case reflect.String:
|
||||
return tomlString
|
||||
case reflect.Map:
|
||||
return tomlHash
|
||||
case reflect.Struct:
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time:
|
||||
return tomlDatetime
|
||||
case TextMarshaler:
|
||||
return tomlString
|
||||
default:
|
||||
return tomlHash
|
||||
}
|
||||
default:
|
||||
panic("unexpected reflect.Kind: " + rv.Kind().String())
|
||||
}
|
||||
}
|
||||
|
||||
// tomlArrayType returns the element type of a TOML array. The type returned
|
||||
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
|
||||
// slize). This function may also panic if it finds a type that cannot be
|
||||
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
|
||||
// nested arrays of tables).
|
||||
func tomlArrayType(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
firstType := tomlTypeOfGo(rv.Index(0))
|
||||
if firstType == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
}
|
||||
|
||||
rvlen := rv.Len()
|
||||
for i := 1; i < rvlen; i++ {
|
||||
elem := rv.Index(i)
|
||||
switch elemType := tomlTypeOfGo(elem); {
|
||||
case elemType == nil:
|
||||
encPanic(errArrayNilElement)
|
||||
case !typeEqual(firstType, elemType):
|
||||
encPanic(errArrayMixedElementTypes)
|
||||
}
|
||||
}
|
||||
// If we have a nested array, then we must make sure that the nested
|
||||
// array contains ONLY primitives.
|
||||
// This checks arbitrarily nested arrays.
|
||||
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
|
||||
nest := tomlArrayType(eindirect(rv.Index(0)))
|
||||
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
|
||||
encPanic(errArrayNoTable)
|
||||
}
|
||||
}
|
||||
return firstType
|
||||
}
|
||||
|
||||
type tagOptions struct {
|
||||
skip bool // "-"
|
||||
name string
|
||||
omitempty bool
|
||||
omitzero bool
|
||||
}
|
||||
|
||||
func getOptions(tag reflect.StructTag) tagOptions {
|
||||
t := tag.Get("toml")
|
||||
if t == "-" {
|
||||
return tagOptions{skip: true}
|
||||
}
|
||||
var opts tagOptions
|
||||
parts := strings.Split(t, ",")
|
||||
opts.name = parts[0]
|
||||
for _, s := range parts[1:] {
|
||||
switch s {
|
||||
case "omitempty":
|
||||
opts.omitempty = true
|
||||
case "omitzero":
|
||||
opts.omitzero = true
|
||||
}
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
func isZero(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return rv.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return rv.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return rv.Float() == 0.0
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isEmpty(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
||||
return rv.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !rv.Bool()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (enc *Encoder) newline() {
|
||||
if enc.hasWritten {
|
||||
enc.wf("\n")
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
panicIfInvalidKey(key)
|
||||
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
|
||||
enc.eElement(val)
|
||||
enc.newline()
|
||||
}
|
||||
|
||||
func (enc *Encoder) wf(format string, v ...interface{}) {
|
||||
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
enc.hasWritten = true
|
||||
}
|
||||
|
||||
func (enc *Encoder) indentStr(key Key) string {
|
||||
return strings.Repeat(enc.Indent, len(key)-1)
|
||||
}
|
||||
|
||||
func encPanic(err error) {
|
||||
panic(tomlEncodeError{err})
|
||||
}
|
||||
|
||||
func eindirect(v reflect.Value) reflect.Value {
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return eindirect(v.Elem())
|
||||
default:
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
func isNil(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return rv.IsNil()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func panicIfInvalidKey(key Key) {
|
||||
for _, k := range key {
|
||||
if len(k) == 0 {
|
||||
encPanic(e("Key '%s' is not a valid table name. Key names "+
|
||||
"cannot be empty.", key.maybeQuotedAll()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isValidKeyName(s string) bool {
|
||||
return len(s) != 0
|
||||
}
|
19
vendor/github.com/BurntSushi/toml/encoding_types.go
generated
vendored
19
vendor/github.com/BurntSushi/toml/encoding_types.go
generated
vendored
|
@ -1,19 +0,0 @@
|
|||
// +build go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// In order to support Go 1.1, we define our own TextMarshaler and
|
||||
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
|
||||
// standard library interfaces.
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
)
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler encoding.TextMarshaler
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
||||
// here so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler encoding.TextUnmarshaler
|
18
vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
generated
vendored
18
vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
generated
vendored
|
@ -1,18 +0,0 @@
|
|||
// +build !go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// These interfaces were introduced in Go 1.2, so we add them manually when
|
||||
// compiling for Go 1.1.
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler interface {
|
||||
MarshalText() (text []byte, err error)
|
||||
}
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
||||
// here so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler interface {
|
||||
UnmarshalText(text []byte) error
|
||||
}
|
953
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
953
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
|
@ -1,953 +0,0 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemError itemType = iota
|
||||
itemNIL // used in the parser to indicate no type
|
||||
itemEOF
|
||||
itemText
|
||||
itemString
|
||||
itemRawString
|
||||
itemMultilineString
|
||||
itemRawMultilineString
|
||||
itemBool
|
||||
itemInteger
|
||||
itemFloat
|
||||
itemDatetime
|
||||
itemArray // the start of an array
|
||||
itemArrayEnd
|
||||
itemTableStart
|
||||
itemTableEnd
|
||||
itemArrayTableStart
|
||||
itemArrayTableEnd
|
||||
itemKeyStart
|
||||
itemCommentStart
|
||||
itemInlineTableStart
|
||||
itemInlineTableEnd
|
||||
)
|
||||
|
||||
const (
|
||||
eof = 0
|
||||
comma = ','
|
||||
tableStart = '['
|
||||
tableEnd = ']'
|
||||
arrayTableStart = '['
|
||||
arrayTableEnd = ']'
|
||||
tableSep = '.'
|
||||
keySep = '='
|
||||
arrayStart = '['
|
||||
arrayEnd = ']'
|
||||
commentStart = '#'
|
||||
stringStart = '"'
|
||||
stringEnd = '"'
|
||||
rawStringStart = '\''
|
||||
rawStringEnd = '\''
|
||||
inlineTableStart = '{'
|
||||
inlineTableEnd = '}'
|
||||
)
|
||||
|
||||
type stateFn func(lx *lexer) stateFn
|
||||
|
||||
type lexer struct {
|
||||
input string
|
||||
start int
|
||||
pos int
|
||||
line int
|
||||
state stateFn
|
||||
items chan item
|
||||
|
||||
// Allow for backing up up to three runes.
|
||||
// This is necessary because TOML contains 3-rune tokens (""" and ''').
|
||||
prevWidths [3]int
|
||||
nprev int // how many of prevWidths are in use
|
||||
// If we emit an eof, we can still back up, but it is not OK to call
|
||||
// next again.
|
||||
atEOF bool
|
||||
|
||||
// A stack of state functions used to maintain context.
|
||||
// The idea is to reuse parts of the state machine in various places.
|
||||
// For example, values can appear at the top level or within arbitrarily
|
||||
// nested arrays. The last state on the stack is used after a value has
|
||||
// been lexed. Similarly for comments.
|
||||
stack []stateFn
|
||||
}
|
||||
|
||||
type item struct {
|
||||
typ itemType
|
||||
val string
|
||||
line int
|
||||
}
|
||||
|
||||
func (lx *lexer) nextItem() item {
|
||||
for {
|
||||
select {
|
||||
case item := <-lx.items:
|
||||
return item
|
||||
default:
|
||||
lx.state = lx.state(lx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func lex(input string) *lexer {
|
||||
lx := &lexer{
|
||||
input: input,
|
||||
state: lexTop,
|
||||
line: 1,
|
||||
items: make(chan item, 10),
|
||||
stack: make([]stateFn, 0, 10),
|
||||
}
|
||||
return lx
|
||||
}
|
||||
|
||||
func (lx *lexer) push(state stateFn) {
|
||||
lx.stack = append(lx.stack, state)
|
||||
}
|
||||
|
||||
func (lx *lexer) pop() stateFn {
|
||||
if len(lx.stack) == 0 {
|
||||
return lx.errorf("BUG in lexer: no states to pop")
|
||||
}
|
||||
last := lx.stack[len(lx.stack)-1]
|
||||
lx.stack = lx.stack[0 : len(lx.stack)-1]
|
||||
return last
|
||||
}
|
||||
|
||||
func (lx *lexer) current() string {
|
||||
return lx.input[lx.start:lx.pos]
|
||||
}
|
||||
|
||||
func (lx *lexer) emit(typ itemType) {
|
||||
lx.items <- item{typ, lx.current(), lx.line}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
func (lx *lexer) emitTrim(typ itemType) {
|
||||
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
func (lx *lexer) next() (r rune) {
|
||||
if lx.atEOF {
|
||||
panic("next called after EOF")
|
||||
}
|
||||
if lx.pos >= len(lx.input) {
|
||||
lx.atEOF = true
|
||||
return eof
|
||||
}
|
||||
|
||||
if lx.input[lx.pos] == '\n' {
|
||||
lx.line++
|
||||
}
|
||||
lx.prevWidths[2] = lx.prevWidths[1]
|
||||
lx.prevWidths[1] = lx.prevWidths[0]
|
||||
if lx.nprev < 3 {
|
||||
lx.nprev++
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
|
||||
lx.prevWidths[0] = w
|
||||
lx.pos += w
|
||||
return r
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (lx *lexer) ignore() {
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can be called only twice between calls to next.
|
||||
func (lx *lexer) backup() {
|
||||
if lx.atEOF {
|
||||
lx.atEOF = false
|
||||
return
|
||||
}
|
||||
if lx.nprev < 1 {
|
||||
panic("backed up too far")
|
||||
}
|
||||
w := lx.prevWidths[0]
|
||||
lx.prevWidths[0] = lx.prevWidths[1]
|
||||
lx.prevWidths[1] = lx.prevWidths[2]
|
||||
lx.nprev--
|
||||
lx.pos -= w
|
||||
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
|
||||
lx.line--
|
||||
}
|
||||
}
|
||||
|
||||
// accept consumes the next rune if it's equal to `valid`.
|
||||
func (lx *lexer) accept(valid rune) bool {
|
||||
if lx.next() == valid {
|
||||
return true
|
||||
}
|
||||
lx.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (lx *lexer) peek() rune {
|
||||
r := lx.next()
|
||||
lx.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// skip ignores all input that matches the given predicate.
|
||||
func (lx *lexer) skip(pred func(rune) bool) {
|
||||
for {
|
||||
r := lx.next()
|
||||
if pred(r) {
|
||||
continue
|
||||
}
|
||||
lx.backup()
|
||||
lx.ignore()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// errorf stops all lexing by emitting an error and returning `nil`.
|
||||
// Note that any value that is a character is escaped if it's a special
|
||||
// character (newlines, tabs, etc.).
|
||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
||||
lx.items <- item{
|
||||
itemError,
|
||||
fmt.Sprintf(format, values...),
|
||||
lx.line,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexTop consumes elements at the top level of TOML data.
|
||||
func lexTop(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isWhitespace(r) || isNL(r) {
|
||||
return lexSkip(lx, lexTop)
|
||||
}
|
||||
switch r {
|
||||
case commentStart:
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case tableStart:
|
||||
return lexTableStart
|
||||
case eof:
|
||||
if lx.pos > lx.start {
|
||||
return lx.errorf("unexpected EOF")
|
||||
}
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
// At this point, the only valid item can be a key, so we back up
|
||||
// and let the key lexer do the rest.
|
||||
lx.backup()
|
||||
lx.push(lexTopEnd)
|
||||
return lexKeyStart
|
||||
}
|
||||
|
||||
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
|
||||
// or a table.) It must see only whitespace, and will turn back to lexTop
|
||||
// upon a newline. If it sees EOF, it will quit the lexer successfully.
|
||||
func lexTopEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == commentStart:
|
||||
// a comment will read to a newline for us.
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case isWhitespace(r):
|
||||
return lexTopEnd
|
||||
case isNL(r):
|
||||
lx.ignore()
|
||||
return lexTop
|
||||
case r == eof:
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
return lx.errorf("expected a top-level item to end with a newline, "+
|
||||
"comment, or EOF, but got %q instead", r)
|
||||
}
|
||||
|
||||
// lexTable lexes the beginning of a table. Namely, it makes sure that
|
||||
// it starts with a character other than '.' and ']'.
|
||||
// It assumes that '[' has already been consumed.
|
||||
// It also handles the case that this is an item in an array of tables.
|
||||
// e.g., '[[name]]'.
|
||||
func lexTableStart(lx *lexer) stateFn {
|
||||
if lx.peek() == arrayTableStart {
|
||||
lx.next()
|
||||
lx.emit(itemArrayTableStart)
|
||||
lx.push(lexArrayTableEnd)
|
||||
} else {
|
||||
lx.emit(itemTableStart)
|
||||
lx.push(lexTableEnd)
|
||||
}
|
||||
return lexTableNameStart
|
||||
}
|
||||
|
||||
func lexTableEnd(lx *lexer) stateFn {
|
||||
lx.emit(itemTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexArrayTableEnd(lx *lexer) stateFn {
|
||||
if r := lx.next(); r != arrayTableEnd {
|
||||
return lx.errorf("expected end of table array name delimiter %q, "+
|
||||
"but got %q instead", arrayTableEnd, r)
|
||||
}
|
||||
lx.emit(itemArrayTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexTableNameStart(lx *lexer) stateFn {
|
||||
lx.skip(isWhitespace)
|
||||
switch r := lx.peek(); {
|
||||
case r == tableEnd || r == eof:
|
||||
return lx.errorf("unexpected end of table name " +
|
||||
"(table names cannot be empty)")
|
||||
case r == tableSep:
|
||||
return lx.errorf("unexpected table separator " +
|
||||
"(table names cannot be empty)")
|
||||
case r == stringStart || r == rawStringStart:
|
||||
lx.ignore()
|
||||
lx.push(lexTableNameEnd)
|
||||
return lexValue // reuse string lexing
|
||||
default:
|
||||
return lexBareTableName
|
||||
}
|
||||
}
|
||||
|
||||
// lexBareTableName lexes the name of a table. It assumes that at least one
|
||||
// valid character for the table has already been read.
|
||||
func lexBareTableName(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isBareKeyChar(r) {
|
||||
return lexBareTableName
|
||||
}
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexTableNameEnd
|
||||
}
|
||||
|
||||
// lexTableNameEnd reads the end of a piece of a table name, optionally
|
||||
// consuming whitespace.
|
||||
func lexTableNameEnd(lx *lexer) stateFn {
|
||||
lx.skip(isWhitespace)
|
||||
switch r := lx.next(); {
|
||||
case isWhitespace(r):
|
||||
return lexTableNameEnd
|
||||
case r == tableSep:
|
||||
lx.ignore()
|
||||
return lexTableNameStart
|
||||
case r == tableEnd:
|
||||
return lx.pop()
|
||||
default:
|
||||
return lx.errorf("expected '.' or ']' to end table name, "+
|
||||
"but got %q instead", r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexKeyStart consumes a key name up until the first non-whitespace character.
|
||||
// lexKeyStart will ignore whitespace.
|
||||
func lexKeyStart(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
switch {
|
||||
case r == keySep:
|
||||
return lx.errorf("unexpected key separator %q", keySep)
|
||||
case isWhitespace(r) || isNL(r):
|
||||
lx.next()
|
||||
return lexSkip(lx, lexKeyStart)
|
||||
case r == stringStart || r == rawStringStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemKeyStart)
|
||||
lx.push(lexKeyEnd)
|
||||
return lexValue // reuse string lexing
|
||||
default:
|
||||
lx.ignore()
|
||||
lx.emit(itemKeyStart)
|
||||
return lexBareKey
|
||||
}
|
||||
}
|
||||
|
||||
// lexBareKey consumes the text of a bare key. Assumes that the first character
|
||||
// (which is not whitespace) has not yet been consumed.
|
||||
func lexBareKey(lx *lexer) stateFn {
|
||||
switch r := lx.next(); {
|
||||
case isBareKeyChar(r):
|
||||
return lexBareKey
|
||||
case isWhitespace(r):
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexKeyEnd
|
||||
case r == keySep:
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexKeyEnd
|
||||
default:
|
||||
return lx.errorf("bare keys cannot contain %q", r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
|
||||
// separator).
|
||||
func lexKeyEnd(lx *lexer) stateFn {
|
||||
switch r := lx.next(); {
|
||||
case r == keySep:
|
||||
return lexSkip(lx, lexValue)
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexKeyEnd)
|
||||
default:
|
||||
return lx.errorf("expected key separator %q, but got %q instead",
|
||||
keySep, r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexValue starts the consumption of a value anywhere a value is expected.
|
||||
// lexValue will ignore whitespace.
|
||||
// After a value is lexed, the last state on the next is popped and returned.
|
||||
func lexValue(lx *lexer) stateFn {
|
||||
// We allow whitespace to precede a value, but NOT newlines.
|
||||
// In array syntax, the array states are responsible for ignoring newlines.
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexValue)
|
||||
case isDigit(r):
|
||||
lx.backup() // avoid an extra state and use the same as above
|
||||
return lexNumberOrDateStart
|
||||
}
|
||||
switch r {
|
||||
case arrayStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemArray)
|
||||
return lexArrayValue
|
||||
case inlineTableStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemInlineTableStart)
|
||||
return lexInlineTableValue
|
||||
case stringStart:
|
||||
if lx.accept(stringStart) {
|
||||
if lx.accept(stringStart) {
|
||||
lx.ignore() // Ignore """
|
||||
return lexMultilineString
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
lx.ignore() // ignore the '"'
|
||||
return lexString
|
||||
case rawStringStart:
|
||||
if lx.accept(rawStringStart) {
|
||||
if lx.accept(rawStringStart) {
|
||||
lx.ignore() // Ignore """
|
||||
return lexMultilineRawString
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
lx.ignore() // ignore the "'"
|
||||
return lexRawString
|
||||
case '+', '-':
|
||||
return lexNumberStart
|
||||
case '.': // special error case, be kind to users
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
if unicode.IsLetter(r) {
|
||||
// Be permissive here; lexBool will give a nice error if the
|
||||
// user wrote something like
|
||||
// x = foo
|
||||
// (i.e. not 'true' or 'false' but is something else word-like.)
|
||||
lx.backup()
|
||||
return lexBool
|
||||
}
|
||||
return lx.errorf("expected value but found %q instead", r)
|
||||
}
|
||||
|
||||
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
|
||||
// have already been consumed. All whitespace and newlines are ignored.
|
||||
func lexArrayValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValue)
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValue)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
return lx.errorf("unexpected comma")
|
||||
case r == arrayEnd:
|
||||
// NOTE(caleb): The spec isn't clear about whether you can have
|
||||
// a trailing comma or not, so we'll allow it.
|
||||
return lexArrayEnd
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexValue
|
||||
}
|
||||
|
||||
// lexArrayValueEnd consumes everything between the end of an array value and
|
||||
// the next value (or the end of the array): it ignores whitespace and newlines
|
||||
// and expects either a ',' or a ']'.
|
||||
func lexArrayValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValueEnd)
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
lx.ignore()
|
||||
return lexArrayValue // move on to the next value
|
||||
case r == arrayEnd:
|
||||
return lexArrayEnd
|
||||
}
|
||||
return lx.errorf(
|
||||
"expected a comma or array terminator %q, but got %q instead",
|
||||
arrayEnd, r,
|
||||
)
|
||||
}
|
||||
|
||||
// lexArrayEnd finishes the lexing of an array.
|
||||
// It assumes that a ']' has just been consumed.
|
||||
func lexArrayEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemArrayEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexInlineTableValue consumes one key/value pair in an inline table.
|
||||
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
|
||||
func lexInlineTableValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValue)
|
||||
case isNL(r):
|
||||
return lx.errorf("newlines not allowed within inline tables")
|
||||
case r == commentStart:
|
||||
lx.push(lexInlineTableValue)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
return lx.errorf("unexpected comma")
|
||||
case r == inlineTableEnd:
|
||||
return lexInlineTableEnd
|
||||
}
|
||||
lx.backup()
|
||||
lx.push(lexInlineTableValueEnd)
|
||||
return lexKeyStart
|
||||
}
|
||||
|
||||
// lexInlineTableValueEnd consumes everything between the end of an inline table
|
||||
// key/value pair and the next pair (or the end of the table):
|
||||
// it ignores whitespace and expects either a ',' or a '}'.
|
||||
func lexInlineTableValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValueEnd)
|
||||
case isNL(r):
|
||||
return lx.errorf("newlines not allowed within inline tables")
|
||||
case r == commentStart:
|
||||
lx.push(lexInlineTableValueEnd)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
lx.ignore()
|
||||
return lexInlineTableValue
|
||||
case r == inlineTableEnd:
|
||||
return lexInlineTableEnd
|
||||
}
|
||||
return lx.errorf("expected a comma or an inline table terminator %q, "+
|
||||
"but got %q instead", inlineTableEnd, r)
|
||||
}
|
||||
|
||||
// lexInlineTableEnd finishes the lexing of an inline table.
|
||||
// It assumes that a '}' has just been consumed.
|
||||
func lexInlineTableEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemInlineTableEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexString consumes the inner contents of a string. It assumes that the
|
||||
// beginning '"' has already been consumed and ignored.
|
||||
func lexString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case isNL(r):
|
||||
return lx.errorf("strings cannot contain newlines")
|
||||
case r == '\\':
|
||||
lx.push(lexString)
|
||||
return lexStringEscape
|
||||
case r == stringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexString
|
||||
}
|
||||
|
||||
// lexMultilineString consumes the inner contents of a string. It assumes that
|
||||
// the beginning '"""' has already been consumed and ignored.
|
||||
func lexMultilineString(lx *lexer) stateFn {
|
||||
switch lx.next() {
|
||||
case eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case '\\':
|
||||
return lexMultilineStringEscape
|
||||
case stringEnd:
|
||||
if lx.accept(stringEnd) {
|
||||
if lx.accept(stringEnd) {
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.emit(itemMultilineString)
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
}
|
||||
return lexMultilineString
|
||||
}
|
||||
|
||||
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
|
||||
// It assumes that the beginning "'" has already been consumed and ignored.
|
||||
func lexRawString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case isNL(r):
|
||||
return lx.errorf("strings cannot contain newlines")
|
||||
case r == rawStringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemRawString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexRawString
|
||||
}
|
||||
|
||||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
|
||||
// a string. It assumes that the beginning "'''" has already been consumed and
|
||||
// ignored.
|
||||
func lexMultilineRawString(lx *lexer) stateFn {
|
||||
switch lx.next() {
|
||||
case eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case rawStringEnd:
|
||||
if lx.accept(rawStringEnd) {
|
||||
if lx.accept(rawStringEnd) {
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.emit(itemRawMultilineString)
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
}
|
||||
return lexMultilineRawString
|
||||
}
|
||||
|
||||
// lexMultilineStringEscape consumes an escaped character. It assumes that the
|
||||
// preceding '\\' has already been consumed.
|
||||
func lexMultilineStringEscape(lx *lexer) stateFn {
|
||||
// Handle the special case first:
|
||||
if isNL(lx.next()) {
|
||||
return lexMultilineString
|
||||
}
|
||||
lx.backup()
|
||||
lx.push(lexMultilineString)
|
||||
return lexStringEscape(lx)
|
||||
}
|
||||
|
||||
func lexStringEscape(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch r {
|
||||
case 'b':
|
||||
fallthrough
|
||||
case 't':
|
||||
fallthrough
|
||||
case 'n':
|
||||
fallthrough
|
||||
case 'f':
|
||||
fallthrough
|
||||
case 'r':
|
||||
fallthrough
|
||||
case '"':
|
||||
fallthrough
|
||||
case '\\':
|
||||
return lx.pop()
|
||||
case 'u':
|
||||
return lexShortUnicodeEscape
|
||||
case 'U':
|
||||
return lexLongUnicodeEscape
|
||||
}
|
||||
return lx.errorf("invalid escape character %q; only the following "+
|
||||
"escape characters are allowed: "+
|
||||
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
|
||||
}
|
||||
|
||||
func lexShortUnicodeEscape(lx *lexer) stateFn {
|
||||
var r rune
|
||||
for i := 0; i < 4; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(`expected four hexadecimal digits after '\u', `+
|
||||
"but got %q instead", lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
func lexLongUnicodeEscape(lx *lexer) stateFn {
|
||||
var r rune
|
||||
for i := 0; i < 8; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(`expected eight hexadecimal digits after '\U', `+
|
||||
"but got %q instead", lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberOrDateStart consumes either an integer, a float, or datetime.
|
||||
func lexNumberOrDateStart(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexNumberOrDate
|
||||
}
|
||||
switch r {
|
||||
case '_':
|
||||
return lexNumber
|
||||
case 'e', 'E':
|
||||
return lexFloat
|
||||
case '.':
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
return lx.errorf("expected a digit but got %q", r)
|
||||
}
|
||||
|
||||
// lexNumberOrDate consumes either an integer, float or datetime.
|
||||
func lexNumberOrDate(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexNumberOrDate
|
||||
}
|
||||
switch r {
|
||||
case '-':
|
||||
return lexDatetime
|
||||
case '_':
|
||||
return lexNumber
|
||||
case '.', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexDatetime consumes a Datetime, to a first approximation.
|
||||
// The parser validates that it matches one of the accepted formats.
|
||||
func lexDatetime(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexDatetime
|
||||
}
|
||||
switch r {
|
||||
case '-', 'T', ':', '.', 'Z', '+':
|
||||
return lexDatetime
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemDatetime)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberStart consumes either an integer or a float. It assumes that a sign
|
||||
// has already been read, but that *no* digits have been consumed.
|
||||
// lexNumberStart will move to the appropriate integer or float states.
|
||||
func lexNumberStart(lx *lexer) stateFn {
|
||||
// We MUST see a digit. Even floats have to start with a digit.
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
if r == '.' {
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
return lx.errorf("expected a digit but got %q", r)
|
||||
}
|
||||
return lexNumber
|
||||
}
|
||||
|
||||
// lexNumber consumes an integer or a float after seeing the first digit.
|
||||
func lexNumber(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexNumber
|
||||
}
|
||||
switch r {
|
||||
case '_':
|
||||
return lexNumber
|
||||
case '.', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexFloat consumes the elements of a float. It allows any sequence of
|
||||
// float-like characters, so floats emitted by the lexer are only a first
|
||||
// approximation and must be validated by the parser.
|
||||
func lexFloat(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexFloat
|
||||
}
|
||||
switch r {
|
||||
case '_', '.', '-', '+', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemFloat)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexBool consumes a bool string: 'true' or 'false.
|
||||
func lexBool(lx *lexer) stateFn {
|
||||
var rs []rune
|
||||
for {
|
||||
r := lx.next()
|
||||
if !unicode.IsLetter(r) {
|
||||
lx.backup()
|
||||
break
|
||||
}
|
||||
rs = append(rs, r)
|
||||
}
|
||||
s := string(rs)
|
||||
switch s {
|
||||
case "true", "false":
|
||||
lx.emit(itemBool)
|
||||
return lx.pop()
|
||||
}
|
||||
return lx.errorf("expected value but found %q instead", s)
|
||||
}
|
||||
|
||||
// lexCommentStart begins the lexing of a comment. It will emit
|
||||
// itemCommentStart and consume no characters, passing control to lexComment.
|
||||
func lexCommentStart(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemCommentStart)
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
|
||||
// It will consume *up to* the first newline character, and pass control
|
||||
// back to the last state on the stack.
|
||||
func lexComment(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
if isNL(r) || r == eof {
|
||||
lx.emit(itemText)
|
||||
return lx.pop()
|
||||
}
|
||||
lx.next()
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexSkip ignores all slurped input and moves on to the next state.
|
||||
func lexSkip(lx *lexer, nextState stateFn) stateFn {
|
||||
return func(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
return nextState
|
||||
}
|
||||
}
|
||||
|
||||
// isWhitespace returns true if `r` is a whitespace character according
|
||||
// to the spec.
|
||||
func isWhitespace(r rune) bool {
|
||||
return r == '\t' || r == ' '
|
||||
}
|
||||
|
||||
func isNL(r rune) bool {
|
||||
return r == '\n' || r == '\r'
|
||||
}
|
||||
|
||||
func isDigit(r rune) bool {
|
||||
return r >= '0' && r <= '9'
|
||||
}
|
||||
|
||||
func isHexadecimal(r rune) bool {
|
||||
return (r >= '0' && r <= '9') ||
|
||||
(r >= 'a' && r <= 'f') ||
|
||||
(r >= 'A' && r <= 'F')
|
||||
}
|
||||
|
||||
func isBareKeyChar(r rune) bool {
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == '_' ||
|
||||
r == '-'
|
||||
}
|
||||
|
||||
func (itype itemType) String() string {
|
||||
switch itype {
|
||||
case itemError:
|
||||
return "Error"
|
||||
case itemNIL:
|
||||
return "NIL"
|
||||
case itemEOF:
|
||||
return "EOF"
|
||||
case itemText:
|
||||
return "Text"
|
||||
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
|
||||
return "String"
|
||||
case itemBool:
|
||||
return "Bool"
|
||||
case itemInteger:
|
||||
return "Integer"
|
||||
case itemFloat:
|
||||
return "Float"
|
||||
case itemDatetime:
|
||||
return "DateTime"
|
||||
case itemTableStart:
|
||||
return "TableStart"
|
||||
case itemTableEnd:
|
||||
return "TableEnd"
|
||||
case itemKeyStart:
|
||||
return "KeyStart"
|
||||
case itemArray:
|
||||
return "Array"
|
||||
case itemArrayEnd:
|
||||
return "ArrayEnd"
|
||||
case itemCommentStart:
|
||||
return "CommentStart"
|
||||
}
|
||||
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
|
||||
}
|
||||
|
||||
func (item item) String() string {
|
||||
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
|
||||
}
|
592
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
592
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
|
@ -1,592 +0,0 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
lx *lexer
|
||||
|
||||
// A list of keys in the order that they appear in the TOML data.
|
||||
ordered []Key
|
||||
|
||||
// the full key for the current hash in scope
|
||||
context Key
|
||||
|
||||
// the base key name for everything except hashes
|
||||
currentKey string
|
||||
|
||||
// rough approximation of line number
|
||||
approxLine int
|
||||
|
||||
// A map of 'key.group.names' to whether they were created implicitly.
|
||||
implicits map[string]bool
|
||||
}
|
||||
|
||||
type parseError string
|
||||
|
||||
func (pe parseError) Error() string {
|
||||
return string(pe)
|
||||
}
|
||||
|
||||
func parse(data string) (p *parser, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
var ok bool
|
||||
if err, ok = r.(parseError); ok {
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
|
||||
p = &parser{
|
||||
mapping: make(map[string]interface{}),
|
||||
types: make(map[string]tomlType),
|
||||
lx: lex(data),
|
||||
ordered: make([]Key, 0),
|
||||
implicits: make(map[string]bool),
|
||||
}
|
||||
for {
|
||||
item := p.next()
|
||||
if item.typ == itemEOF {
|
||||
break
|
||||
}
|
||||
p.topLevel(item)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *parser) panicf(format string, v ...interface{}) {
|
||||
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
|
||||
p.approxLine, p.current(), fmt.Sprintf(format, v...))
|
||||
panic(parseError(msg))
|
||||
}
|
||||
|
||||
func (p *parser) next() item {
|
||||
it := p.lx.nextItem()
|
||||
if it.typ == itemError {
|
||||
p.panicf("%s", it.val)
|
||||
}
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) bug(format string, v ...interface{}) {
|
||||
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
|
||||
}
|
||||
|
||||
func (p *parser) expect(typ itemType) item {
|
||||
it := p.next()
|
||||
p.assertEqual(typ, it.typ)
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) assertEqual(expected, got itemType) {
|
||||
if expected != got {
|
||||
p.bug("Expected '%s' but got '%s'.", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) topLevel(item item) {
|
||||
switch item.typ {
|
||||
case itemCommentStart:
|
||||
p.approxLine = item.line
|
||||
p.expect(itemText)
|
||||
case itemTableStart:
|
||||
kg := p.next()
|
||||
p.approxLine = kg.line
|
||||
|
||||
var key Key
|
||||
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
|
||||
key = append(key, p.keyString(kg))
|
||||
}
|
||||
p.assertEqual(itemTableEnd, kg.typ)
|
||||
|
||||
p.establishContext(key, false)
|
||||
p.setType("", tomlHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemArrayTableStart:
|
||||
kg := p.next()
|
||||
p.approxLine = kg.line
|
||||
|
||||
var key Key
|
||||
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
|
||||
key = append(key, p.keyString(kg))
|
||||
}
|
||||
p.assertEqual(itemArrayTableEnd, kg.typ)
|
||||
|
||||
p.establishContext(key, true)
|
||||
p.setType("", tomlArrayHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemKeyStart:
|
||||
kname := p.next()
|
||||
p.approxLine = kname.line
|
||||
p.currentKey = p.keyString(kname)
|
||||
|
||||
val, typ := p.value(p.next())
|
||||
p.setValue(p.currentKey, val)
|
||||
p.setType(p.currentKey, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
p.currentKey = ""
|
||||
default:
|
||||
p.bug("Unexpected type at top level: %s", item.typ)
|
||||
}
|
||||
}
|
||||
|
||||
// Gets a string for a key (or part of a key in a table name).
|
||||
func (p *parser) keyString(it item) string {
|
||||
switch it.typ {
|
||||
case itemText:
|
||||
return it.val
|
||||
case itemString, itemMultilineString,
|
||||
itemRawString, itemRawMultilineString:
|
||||
s, _ := p.value(it)
|
||||
return s.(string)
|
||||
default:
|
||||
p.bug("Unexpected key type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
// value translates an expected value from the lexer into a Go value wrapped
|
||||
// as an empty interface.
|
||||
func (p *parser) value(it item) (interface{}, tomlType) {
|
||||
switch it.typ {
|
||||
case itemString:
|
||||
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
|
||||
case itemMultilineString:
|
||||
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
|
||||
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
|
||||
case itemRawString:
|
||||
return it.val, p.typeOfPrimitive(it)
|
||||
case itemRawMultilineString:
|
||||
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
|
||||
case itemBool:
|
||||
switch it.val {
|
||||
case "true":
|
||||
return true, p.typeOfPrimitive(it)
|
||||
case "false":
|
||||
return false, p.typeOfPrimitive(it)
|
||||
}
|
||||
p.bug("Expected boolean value, but got '%s'.", it.val)
|
||||
case itemInteger:
|
||||
if !numUnderscoresOK(it.val) {
|
||||
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
|
||||
it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
num, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
// Distinguish integer values. Normally, it'd be a bug if the lexer
|
||||
// provides an invalid integer, but it's possible that the number is
|
||||
// out of range of valid values (which the lexer cannot determine).
|
||||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Integer '%s' is out of the range of 64-bit "+
|
||||
"signed integers.", it.val)
|
||||
} else {
|
||||
p.bug("Expected integer value, but got '%s'.", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemFloat:
|
||||
parts := strings.FieldsFunc(it.val, func(r rune) bool {
|
||||
switch r {
|
||||
case '.', 'e', 'E':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
for _, part := range parts {
|
||||
if !numUnderscoresOK(part) {
|
||||
p.panicf("Invalid float %q: underscores must be "+
|
||||
"surrounded by digits", it.val)
|
||||
}
|
||||
}
|
||||
if !numPeriodsOK(it.val) {
|
||||
// As a special case, numbers like '123.' or '1.e2',
|
||||
// which are valid as far as Go/strconv are concerned,
|
||||
// must be rejected because TOML says that a fractional
|
||||
// part consists of '.' followed by 1+ digits.
|
||||
p.panicf("Invalid float %q: '.' must be followed "+
|
||||
"by one or more digits", it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
num, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Float '%s' is out of the range of 64-bit "+
|
||||
"IEEE-754 floating-point numbers.", it.val)
|
||||
} else {
|
||||
p.panicf("Invalid float value: %q", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemDatetime:
|
||||
var t time.Time
|
||||
var ok bool
|
||||
var err error
|
||||
for _, format := range []string{
|
||||
"2006-01-02T15:04:05Z07:00",
|
||||
"2006-01-02T15:04:05",
|
||||
"2006-01-02",
|
||||
} {
|
||||
t, err = time.ParseInLocation(format, it.val, time.Local)
|
||||
if err == nil {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
p.panicf("Invalid TOML Datetime: %q.", it.val)
|
||||
}
|
||||
return t, p.typeOfPrimitive(it)
|
||||
case itemArray:
|
||||
array := make([]interface{}, 0)
|
||||
types := make([]tomlType, 0)
|
||||
|
||||
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
val, typ := p.value(it)
|
||||
array = append(array, val)
|
||||
types = append(types, typ)
|
||||
}
|
||||
return array, p.typeOfArray(types)
|
||||
case itemInlineTableStart:
|
||||
var (
|
||||
hash = make(map[string]interface{})
|
||||
outerContext = p.context
|
||||
outerKey = p.currentKey
|
||||
)
|
||||
|
||||
p.context = append(p.context, p.currentKey)
|
||||
p.currentKey = ""
|
||||
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
|
||||
if it.typ != itemKeyStart {
|
||||
p.bug("Expected key start but instead found %q, around line %d",
|
||||
it.val, p.approxLine)
|
||||
}
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
// retrieve key
|
||||
k := p.next()
|
||||
p.approxLine = k.line
|
||||
kname := p.keyString(k)
|
||||
|
||||
// retrieve value
|
||||
p.currentKey = kname
|
||||
val, typ := p.value(p.next())
|
||||
// make sure we keep metadata up to date
|
||||
p.setType(kname, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
hash[kname] = val
|
||||
}
|
||||
p.context = outerContext
|
||||
p.currentKey = outerKey
|
||||
return hash, tomlHash
|
||||
}
|
||||
p.bug("Unexpected value type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// numUnderscoresOK checks whether each underscore in s is surrounded by
|
||||
// characters that are not underscores.
|
||||
func numUnderscoresOK(s string) bool {
|
||||
accept := false
|
||||
for _, r := range s {
|
||||
if r == '_' {
|
||||
if !accept {
|
||||
return false
|
||||
}
|
||||
accept = false
|
||||
continue
|
||||
}
|
||||
accept = true
|
||||
}
|
||||
return accept
|
||||
}
|
||||
|
||||
// numPeriodsOK checks whether every period in s is followed by a digit.
|
||||
func numPeriodsOK(s string) bool {
|
||||
period := false
|
||||
for _, r := range s {
|
||||
if period && !isDigit(r) {
|
||||
return false
|
||||
}
|
||||
period = r == '.'
|
||||
}
|
||||
return !period
|
||||
}
|
||||
|
||||
// establishContext sets the current context of the parser,
|
||||
// where the context is either a hash or an array of hashes. Which one is
|
||||
// set depends on the value of the `array` parameter.
|
||||
//
|
||||
// Establishing the context also makes sure that the key isn't a duplicate, and
|
||||
// will create implicit hashes automatically.
|
||||
func (p *parser) establishContext(key Key, array bool) {
|
||||
var ok bool
|
||||
|
||||
// Always start at the top level and drill down for our context.
|
||||
hashContext := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
|
||||
// We only need implicit hashes for key[0:-1]
|
||||
for _, k := range key[0 : len(key)-1] {
|
||||
_, ok = hashContext[k]
|
||||
keyContext = append(keyContext, k)
|
||||
|
||||
// No key? Make an implicit hash and move on.
|
||||
if !ok {
|
||||
p.addImplicit(keyContext)
|
||||
hashContext[k] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
// If the hash context is actually an array of tables, then set
|
||||
// the hash context to the last element in that array.
|
||||
//
|
||||
// Otherwise, it better be a table, since this MUST be a key group (by
|
||||
// virtue of it not being the last element in a key).
|
||||
switch t := hashContext[k].(type) {
|
||||
case []map[string]interface{}:
|
||||
hashContext = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
hashContext = t
|
||||
default:
|
||||
p.panicf("Key '%s' was already created as a hash.", keyContext)
|
||||
}
|
||||
}
|
||||
|
||||
p.context = keyContext
|
||||
if array {
|
||||
// If this is the first element for this array, then allocate a new
|
||||
// list of tables for it.
|
||||
k := key[len(key)-1]
|
||||
if _, ok := hashContext[k]; !ok {
|
||||
hashContext[k] = make([]map[string]interface{}, 0, 5)
|
||||
}
|
||||
|
||||
// Add a new table. But make sure the key hasn't already been used
|
||||
// for something else.
|
||||
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
|
||||
hashContext[k] = append(hash, make(map[string]interface{}))
|
||||
} else {
|
||||
p.panicf("Key '%s' was already created and cannot be used as "+
|
||||
"an array.", keyContext)
|
||||
}
|
||||
} else {
|
||||
p.setValue(key[len(key)-1], make(map[string]interface{}))
|
||||
}
|
||||
p.context = append(p.context, key[len(key)-1])
|
||||
}
|
||||
|
||||
// setValue sets the given key to the given value in the current context.
|
||||
// It will make sure that the key hasn't already been defined, account for
|
||||
// implicit key groups.
|
||||
func (p *parser) setValue(key string, value interface{}) {
|
||||
var tmpHash interface{}
|
||||
var ok bool
|
||||
|
||||
hash := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
if tmpHash, ok = hash[k]; !ok {
|
||||
p.bug("Context for key '%s' has not been established.", keyContext)
|
||||
}
|
||||
switch t := tmpHash.(type) {
|
||||
case []map[string]interface{}:
|
||||
// The context is a table of hashes. Pick the most recent table
|
||||
// defined as the current hash.
|
||||
hash = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
hash = t
|
||||
default:
|
||||
p.bug("Expected hash to have type 'map[string]interface{}', but "+
|
||||
"it has '%T' instead.", tmpHash)
|
||||
}
|
||||
}
|
||||
keyContext = append(keyContext, key)
|
||||
|
||||
if _, ok := hash[key]; ok {
|
||||
// Typically, if the given key has already been set, then we have
|
||||
// to raise an error since duplicate keys are disallowed. However,
|
||||
// it's possible that a key was previously defined implicitly. In this
|
||||
// case, it is allowed to be redefined concretely. (See the
|
||||
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
|
||||
//
|
||||
// But we have to make sure to stop marking it as an implicit. (So that
|
||||
// another redefinition provokes an error.)
|
||||
//
|
||||
// Note that since it has already been defined (as a hash), we don't
|
||||
// want to overwrite it. So our business is done.
|
||||
if p.isImplicit(keyContext) {
|
||||
p.removeImplicit(keyContext)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, we have a concrete key trying to override a previous
|
||||
// key, which is *always* wrong.
|
||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
||||
}
|
||||
hash[key] = value
|
||||
}
|
||||
|
||||
// setType sets the type of a particular value at a given key.
|
||||
// It should be called immediately AFTER setValue.
|
||||
//
|
||||
// Note that if `key` is empty, then the type given will be applied to the
|
||||
// current context (which is either a table or an array of tables).
|
||||
func (p *parser) setType(key string, typ tomlType) {
|
||||
keyContext := make(Key, 0, len(p.context)+1)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
}
|
||||
if len(key) > 0 { // allow type setting for hashes
|
||||
keyContext = append(keyContext, key)
|
||||
}
|
||||
p.types[keyContext.String()] = typ
|
||||
}
|
||||
|
||||
// addImplicit sets the given Key as having been created implicitly.
|
||||
func (p *parser) addImplicit(key Key) {
|
||||
p.implicits[key.String()] = true
|
||||
}
|
||||
|
||||
// removeImplicit stops tagging the given key as having been implicitly
|
||||
// created.
|
||||
func (p *parser) removeImplicit(key Key) {
|
||||
p.implicits[key.String()] = false
|
||||
}
|
||||
|
||||
// isImplicit returns true if the key group pointed to by the key was created
|
||||
// implicitly.
|
||||
func (p *parser) isImplicit(key Key) bool {
|
||||
return p.implicits[key.String()]
|
||||
}
|
||||
|
||||
// current returns the full key name of the current context.
|
||||
func (p *parser) current() string {
|
||||
if len(p.currentKey) == 0 {
|
||||
return p.context.String()
|
||||
}
|
||||
if len(p.context) == 0 {
|
||||
return p.currentKey
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
|
||||
}
|
||||
|
||||
func stripFirstNewline(s string) string {
|
||||
if len(s) == 0 || s[0] != '\n' {
|
||||
return s
|
||||
}
|
||||
return s[1:]
|
||||
}
|
||||
|
||||
func stripEscapedWhitespace(s string) string {
|
||||
esc := strings.Split(s, "\\\n")
|
||||
if len(esc) > 1 {
|
||||
for i := 1; i < len(esc); i++ {
|
||||
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
|
||||
}
|
||||
}
|
||||
return strings.Join(esc, "")
|
||||
}
|
||||
|
||||
func (p *parser) replaceEscapes(str string) string {
|
||||
var replaced []rune
|
||||
s := []byte(str)
|
||||
r := 0
|
||||
for r < len(s) {
|
||||
if s[r] != '\\' {
|
||||
c, size := utf8.DecodeRune(s[r:])
|
||||
r += size
|
||||
replaced = append(replaced, c)
|
||||
continue
|
||||
}
|
||||
r += 1
|
||||
if r >= len(s) {
|
||||
p.bug("Escape sequence at end of string.")
|
||||
return ""
|
||||
}
|
||||
switch s[r] {
|
||||
default:
|
||||
p.bug("Expected valid escape code after \\, but got %q.", s[r])
|
||||
return ""
|
||||
case 'b':
|
||||
replaced = append(replaced, rune(0x0008))
|
||||
r += 1
|
||||
case 't':
|
||||
replaced = append(replaced, rune(0x0009))
|
||||
r += 1
|
||||
case 'n':
|
||||
replaced = append(replaced, rune(0x000A))
|
||||
r += 1
|
||||
case 'f':
|
||||
replaced = append(replaced, rune(0x000C))
|
||||
r += 1
|
||||
case 'r':
|
||||
replaced = append(replaced, rune(0x000D))
|
||||
r += 1
|
||||
case '"':
|
||||
replaced = append(replaced, rune(0x0022))
|
||||
r += 1
|
||||
case '\\':
|
||||
replaced = append(replaced, rune(0x005C))
|
||||
r += 1
|
||||
case 'u':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 5
|
||||
case 'U':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 9
|
||||
}
|
||||
}
|
||||
return string(replaced)
|
||||
}
|
||||
|
||||
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
|
||||
s := string(bs)
|
||||
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
|
||||
if err != nil {
|
||||
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
|
||||
"lexer claims it's OK: %s", s, err)
|
||||
}
|
||||
if !utf8.ValidRune(rune(hex)) {
|
||||
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
|
||||
}
|
||||
return rune(hex)
|
||||
}
|
||||
|
||||
func isStringType(ty itemType) bool {
|
||||
return ty == itemString || ty == itemMultilineString ||
|
||||
ty == itemRawString || ty == itemRawMultilineString
|
||||
}
|
91
vendor/github.com/BurntSushi/toml/type_check.go
generated
vendored
91
vendor/github.com/BurntSushi/toml/type_check.go
generated
vendored
|
@ -1,91 +0,0 @@
|
|||
package toml
|
||||
|
||||
// tomlType represents any Go type that corresponds to a TOML type.
|
||||
// While the first draft of the TOML spec has a simplistic type system that
|
||||
// probably doesn't need this level of sophistication, we seem to be militating
|
||||
// toward adding real composite types.
|
||||
type tomlType interface {
|
||||
typeString() string
|
||||
}
|
||||
|
||||
// typeEqual accepts any two types and returns true if they are equal.
|
||||
func typeEqual(t1, t2 tomlType) bool {
|
||||
if t1 == nil || t2 == nil {
|
||||
return false
|
||||
}
|
||||
return t1.typeString() == t2.typeString()
|
||||
}
|
||||
|
||||
func typeIsHash(t tomlType) bool {
|
||||
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
|
||||
}
|
||||
|
||||
type tomlBaseType string
|
||||
|
||||
func (btype tomlBaseType) typeString() string {
|
||||
return string(btype)
|
||||
}
|
||||
|
||||
func (btype tomlBaseType) String() string {
|
||||
return btype.typeString()
|
||||
}
|
||||
|
||||
var (
|
||||
tomlInteger tomlBaseType = "Integer"
|
||||
tomlFloat tomlBaseType = "Float"
|
||||
tomlDatetime tomlBaseType = "Datetime"
|
||||
tomlString tomlBaseType = "String"
|
||||
tomlBool tomlBaseType = "Bool"
|
||||
tomlArray tomlBaseType = "Array"
|
||||
tomlHash tomlBaseType = "Hash"
|
||||
tomlArrayHash tomlBaseType = "ArrayHash"
|
||||
)
|
||||
|
||||
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
|
||||
// Primitive values are: Integer, Float, Datetime, String and Bool.
|
||||
//
|
||||
// Passing a lexer item other than the following will cause a BUG message
|
||||
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
|
||||
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
|
||||
switch lexItem.typ {
|
||||
case itemInteger:
|
||||
return tomlInteger
|
||||
case itemFloat:
|
||||
return tomlFloat
|
||||
case itemDatetime:
|
||||
return tomlDatetime
|
||||
case itemString:
|
||||
return tomlString
|
||||
case itemMultilineString:
|
||||
return tomlString
|
||||
case itemRawString:
|
||||
return tomlString
|
||||
case itemRawMultilineString:
|
||||
return tomlString
|
||||
case itemBool:
|
||||
return tomlBool
|
||||
}
|
||||
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// typeOfArray returns a tomlType for an array given a list of types of its
|
||||
// values.
|
||||
//
|
||||
// In the current spec, if an array is homogeneous, then its type is always
|
||||
// "Array". If the array is not homogeneous, an error is generated.
|
||||
func (p *parser) typeOfArray(types []tomlType) tomlType {
|
||||
// Empty arrays are cool.
|
||||
if len(types) == 0 {
|
||||
return tomlArray
|
||||
}
|
||||
|
||||
theType := types[0]
|
||||
for _, t := range types[1:] {
|
||||
if !typeEqual(theType, t) {
|
||||
p.panicf("Array contains values of type '%s' and '%s', but "+
|
||||
"arrays must be homogeneous.", theType, t)
|
||||
}
|
||||
}
|
||||
return tomlArray
|
||||
}
|
242
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
242
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
|
@ -1,242 +0,0 @@
|
|||
package toml
|
||||
|
||||
// Struct field handling is adapted from code in encoding/json:
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the Go distribution.
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A field represents a single field found in a struct.
|
||||
type field struct {
|
||||
name string // the name of the field (`toml` tag included)
|
||||
tag bool // whether field has a `toml` tag
|
||||
index []int // represents the depth of an anonymous field
|
||||
typ reflect.Type // the type of the field
|
||||
}
|
||||
|
||||
// byName sorts field by name, breaking ties with depth,
|
||||
// then breaking ties with "name came from toml tag", then
|
||||
// breaking ties with index sequence.
|
||||
type byName []field
|
||||
|
||||
func (x byName) Len() int { return len(x) }
|
||||
|
||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byName) Less(i, j int) bool {
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
}
|
||||
if len(x[i].index) != len(x[j].index) {
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
if x[i].tag != x[j].tag {
|
||||
return x[i].tag
|
||||
}
|
||||
return byIndex(x).Less(i, j)
|
||||
}
|
||||
|
||||
// byIndex sorts field by index sequence.
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
return false
|
||||
}
|
||||
if xik != x[j].index[k] {
|
||||
return xik < x[j].index[k]
|
||||
}
|
||||
}
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
|
||||
// typeFields returns a list of fields that TOML should recognize for the given
|
||||
// type. The algorithm is breadth-first search over the set of structs to
|
||||
// include - the top struct and then any reachable anonymous structs.
|
||||
func typeFields(t reflect.Type) []field {
|
||||
// Anonymous fields to explore at the current level and the next.
|
||||
current := []field{}
|
||||
next := []field{{typ: t}}
|
||||
|
||||
// Count of queued names for current level and the next.
|
||||
count := map[reflect.Type]int{}
|
||||
nextCount := map[reflect.Type]int{}
|
||||
|
||||
// Types already visited at an earlier level.
|
||||
visited := map[reflect.Type]bool{}
|
||||
|
||||
// Fields found.
|
||||
var fields []field
|
||||
|
||||
for len(next) > 0 {
|
||||
current, next = next, current[:0]
|
||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||
|
||||
for _, f := range current {
|
||||
if visited[f.typ] {
|
||||
continue
|
||||
}
|
||||
visited[f.typ] = true
|
||||
|
||||
// Scan f.typ for fields to include.
|
||||
for i := 0; i < f.typ.NumField(); i++ {
|
||||
sf := f.typ.Field(i)
|
||||
if sf.PkgPath != "" && !sf.Anonymous { // unexported
|
||||
continue
|
||||
}
|
||||
opts := getOptions(sf.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
index := make([]int, len(f.index)+1)
|
||||
copy(index, f.index)
|
||||
index[len(f.index)] = i
|
||||
|
||||
ft := sf.Type
|
||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
||||
// Follow pointer.
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
// Record found field and index sequence.
|
||||
if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := opts.name != ""
|
||||
name := opts.name
|
||||
if name == "" {
|
||||
name = sf.Name
|
||||
}
|
||||
fields = append(fields, field{name, tagged, index, ft})
|
||||
if count[f.typ] > 1 {
|
||||
// If there were multiple instances, add a second,
|
||||
// so that the annihilation code will see a duplicate.
|
||||
// It only cares about the distinction between 1 or 2,
|
||||
// so don't bother generating any more copies.
|
||||
fields = append(fields, fields[len(fields)-1])
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Record new anonymous struct to explore in next round.
|
||||
nextCount[ft]++
|
||||
if nextCount[ft] == 1 {
|
||||
f := field{name: ft.Name(), index: index, typ: ft}
|
||||
next = append(next, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byName(fields))
|
||||
|
||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||
// except that fields with TOML tags are promoted.
|
||||
|
||||
// The fields are sorted in primary order of name, secondary order
|
||||
// of field index length. Loop over names; for each name, delete
|
||||
// hidden fields by choosing the one dominant field that survives.
|
||||
out := fields[:0]
|
||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||
// One iteration per name.
|
||||
// Find the sequence of fields with the name of this first field.
|
||||
fi := fields[i]
|
||||
name := fi.name
|
||||
for advance = 1; i+advance < len(fields); advance++ {
|
||||
fj := fields[i+advance]
|
||||
if fj.name != name {
|
||||
break
|
||||
}
|
||||
}
|
||||
if advance == 1 { // Only one field with this name
|
||||
out = append(out, fi)
|
||||
continue
|
||||
}
|
||||
dominant, ok := dominantField(fields[i : i+advance])
|
||||
if ok {
|
||||
out = append(out, dominant)
|
||||
}
|
||||
}
|
||||
|
||||
fields = out
|
||||
sort.Sort(byIndex(fields))
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// dominantField looks through the fields, all of which are known to
|
||||
// have the same name, to find the single field that dominates the
|
||||
// others using Go's embedding rules, modified by the presence of
|
||||
// TOML tags. If there are multiple top-level fields, the boolean
|
||||
// will be false: This condition is an error in Go and we skip all
|
||||
// the fields.
|
||||
func dominantField(fields []field) (field, bool) {
|
||||
// The fields are sorted in increasing index-length order. The winner
|
||||
// must therefore be one with the shortest index length. Drop all
|
||||
// longer entries, which is easy: just truncate the slice.
|
||||
length := len(fields[0].index)
|
||||
tagged := -1 // Index of first tagged field.
|
||||
for i, f := range fields {
|
||||
if len(f.index) > length {
|
||||
fields = fields[:i]
|
||||
break
|
||||
}
|
||||
if f.tag {
|
||||
if tagged >= 0 {
|
||||
// Multiple tagged fields at the same level: conflict.
|
||||
// Return no field.
|
||||
return field{}, false
|
||||
}
|
||||
tagged = i
|
||||
}
|
||||
}
|
||||
if tagged >= 0 {
|
||||
return fields[tagged], true
|
||||
}
|
||||
// All remaining fields have the same length. If there's more than one,
|
||||
// we have a conflict (two fields named "X" at the same level) and we
|
||||
// return no field.
|
||||
if len(fields) > 1 {
|
||||
return field{}, false
|
||||
}
|
||||
return fields[0], true
|
||||
}
|
||||
|
||||
var fieldCache struct {
|
||||
sync.RWMutex
|
||||
m map[reflect.Type][]field
|
||||
}
|
||||
|
||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
||||
func cachedTypeFields(t reflect.Type) []field {
|
||||
fieldCache.RLock()
|
||||
f := fieldCache.m[t]
|
||||
fieldCache.RUnlock()
|
||||
if f != nil {
|
||||
return f
|
||||
}
|
||||
|
||||
// Compute fields without lock.
|
||||
// Might duplicate effort but won't hold other computations back.
|
||||
f = typeFields(t)
|
||||
if f == nil {
|
||||
f = []field{}
|
||||
}
|
||||
|
||||
fieldCache.Lock()
|
||||
if fieldCache.m == nil {
|
||||
fieldCache.m = map[reflect.Type][]field{}
|
||||
}
|
||||
fieldCache.m[t] = f
|
||||
fieldCache.Unlock()
|
||||
return f
|
||||
}
|
23
vendor/github.com/moby/buildkit/README.md
generated
vendored
23
vendor/github.com/moby/buildkit/README.md
generated
vendored
|
@ -60,6 +60,7 @@ You don't need to read this document unless you want to use the full-featured st
|
|||
- [`--export-cache` options](#--export-cache-options)
|
||||
- [`--import-cache` options](#--import-cache-options)
|
||||
- [Consistent hashing](#consistent-hashing)
|
||||
- [Systemd socket activation](#systemd-socket-activation)
|
||||
- [Expose BuildKit as a TCP service](#expose-buildkit-as-a-tcp-service)
|
||||
- [Load balancing](#load-balancing)
|
||||
- [Containerizing BuildKit](#containerizing-buildkit)
|
||||
|
@ -85,6 +86,7 @@ BuildKit is used by the following projects:
|
|||
- [the Sanic build tool](https://github.com/distributed-containers-inc/sanic)
|
||||
- [vab](https://github.com/stellarproject/vab)
|
||||
- [Rio](https://github.com/rancher/rio)
|
||||
- [kim](https://github.com/rancher/kim)
|
||||
- [PouchContainer](https://github.com/alibaba/pouch)
|
||||
- [Docker buildx](https://github.com/docker/buildx)
|
||||
- [Okteto Cloud](https://okteto.com/)
|
||||
|
@ -126,6 +128,9 @@ By default, the OCI (runc) worker is used. You can set `--oci-worker=false --con
|
|||
|
||||
We are open to adding more backends.
|
||||
|
||||
To start the buildkitd daemon using systemd socket activiation, you can install the buildkit systemd unit files.
|
||||
See [Systemd socket activation](#systemd-socket-activation)
|
||||
|
||||
The buildkitd daemon listens gRPC API on `/run/buildkit/buildkitd.sock` by default, but you can also use TCP sockets.
|
||||
See [Expose BuildKit as a TCP service](#expose-buildkit-as-a-tcp-service).
|
||||
|
||||
|
@ -370,6 +375,24 @@ consider client-side load balancing using consistent hashing.
|
|||
|
||||
See [`./examples/kubernetes/consistenthash`](./examples/kubernetes/consistenthash).
|
||||
|
||||
## Metadata
|
||||
|
||||
To output build metadata such as the image digest, pass the `--metadata-file` flag.
|
||||
The metadata will be written as a JSON object to the specified file.
|
||||
The directory of the specified file must already exist and be writable.
|
||||
|
||||
```
|
||||
buildctl build ... --metadata-file metadata.json
|
||||
```
|
||||
|
||||
```
|
||||
{"containerimage.digest": "sha256:ea0cfb27fd41ea0405d3095880c1efa45710f5bcdddb7d7d5a7317ad4825ae14",...}
|
||||
```
|
||||
|
||||
## Systemd socket activation
|
||||
|
||||
On Systemd based systems, you can communicate with the daemon via [Systemd socket activation](http://0pointer.de/blog/projects/socket-activation.html), use `buildkitd --addr fd://`.
|
||||
You can find examples of using Systemd socket activation with BuildKit and Systemd in [`./examples/systemd`](./examples/systemd).
|
||||
## Expose BuildKit as a TCP service
|
||||
|
||||
The `buildkitd` daemon can listen the gRPC API on a TCP socket.
|
||||
|
|
92
vendor/github.com/moby/buildkit/api/services/control/control.pb.go
generated
vendored
92
vendor/github.com/moby/buildkit/api/services/control/control.pb.go
generated
vendored
|
@ -3233,10 +3233,7 @@ func (m *PruneRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -3319,10 +3316,7 @@ func (m *DiskUsageRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -3407,10 +3401,7 @@ func (m *DiskUsageResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -3756,10 +3747,7 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -4020,7 +4008,7 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -4211,7 +4199,7 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -4405,7 +4393,7 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -4422,10 +4410,7 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -4650,7 +4635,7 @@ func (m *CacheOptions) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -4735,10 +4720,7 @@ func (m *CacheOptions) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -4931,7 +4913,7 @@ func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -4948,10 +4930,7 @@ func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -5112,7 +5091,7 @@ func (m *SolveResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -5129,10 +5108,7 @@ func (m *SolveResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -5215,10 +5191,7 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -5371,10 +5344,7 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -5645,10 +5615,7 @@ func (m *Vertex) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -5938,10 +5905,7 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -6110,10 +6074,7 @@ func (m *VertexLog) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -6198,10 +6159,7 @@ func (m *BytesMessage) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -6284,10 +6242,7 @@ func (m *ListWorkersRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -6372,10 +6327,7 @@ func (m *ListWorkersResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
|
3
vendor/github.com/moby/buildkit/api/services/control/control.proto
generated
vendored
3
vendor/github.com/moby/buildkit/api/services/control/control.proto
generated
vendored
|
@ -2,9 +2,6 @@ syntax = "proto3";
|
|||
|
||||
package moby.buildkit.v1;
|
||||
|
||||
// The control API is currently considered experimental and may break in a backwards
|
||||
// incompatible way.
|
||||
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "github.com/moby/buildkit/solver/pb/ops.proto";
|
||||
|
|
12
vendor/github.com/moby/buildkit/api/types/worker.pb.go
generated
vendored
12
vendor/github.com/moby/buildkit/api/types/worker.pb.go
generated
vendored
|
@ -595,7 +595,7 @@ func (m *WorkerRecord) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthWorker
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -680,10 +680,7 @@ func (m *WorkerRecord) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthWorker
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthWorker
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -824,10 +821,7 @@ func (m *GCPolicy) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthWorker
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthWorker
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
|
10
vendor/github.com/moby/buildkit/cache/blobs.go
generated
vendored
10
vendor/github.com/moby/buildkit/cache/blobs.go
generated
vendored
|
@ -21,10 +21,6 @@ var g flightcontrol.Group
|
|||
|
||||
const containerdUncompressed = "containerd.io/uncompressed"
|
||||
|
||||
type CompareWithParent interface {
|
||||
CompareWithParent(ctx context.Context, ref string, opts ...diff.Opt) (ocispec.Descriptor, error)
|
||||
}
|
||||
|
||||
var ErrNoBlobs = errors.Errorf("no blobs for snapshot")
|
||||
|
||||
// computeBlobChain ensures every ref in a parent chain has an associated blob in the content store. If
|
||||
|
@ -77,12 +73,6 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
|
|||
var descr ocispec.Descriptor
|
||||
var err error
|
||||
|
||||
if pc, ok := sr.cm.Differ.(CompareWithParent); ok {
|
||||
descr, err = pc.CompareWithParent(ctx, sr.ID(), diff.WithMediaType(mediaType))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if descr.Digest == "" {
|
||||
// reference needs to be committed
|
||||
var lower []mount.Mount
|
||||
|
|
225
vendor/github.com/moby/buildkit/cache/contenthash/checksum.go
generated
vendored
225
vendor/github.com/moby/buildkit/cache/contenthash/checksum.go
generated
vendored
|
@ -8,8 +8,10 @@ import (
|
|||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
iradix "github.com/hashicorp/go-immutable-radix"
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
|
@ -45,12 +47,15 @@ func getDefaultManager() *cacheManager {
|
|||
// header, "/dir" is for contents. For the root node "" (empty string) is the
|
||||
// key for root, "/" for the root header
|
||||
|
||||
func Checksum(ctx context.Context, ref cache.ImmutableRef, path string, followLinks bool, s session.Group) (digest.Digest, error) {
|
||||
return getDefaultManager().Checksum(ctx, ref, path, followLinks, s)
|
||||
type ChecksumOpts struct {
|
||||
FollowLinks bool
|
||||
Wildcard bool
|
||||
IncludePatterns []string
|
||||
ExcludePatterns []string
|
||||
}
|
||||
|
||||
func ChecksumWildcard(ctx context.Context, ref cache.ImmutableRef, path string, followLinks bool, s session.Group) (digest.Digest, error) {
|
||||
return getDefaultManager().ChecksumWildcard(ctx, ref, path, followLinks, s)
|
||||
func Checksum(ctx context.Context, ref cache.ImmutableRef, path string, opts ChecksumOpts, s session.Group) (digest.Digest, error) {
|
||||
return getDefaultManager().Checksum(ctx, ref, path, opts, s)
|
||||
}
|
||||
|
||||
func GetCacheContext(ctx context.Context, md *metadata.StorageItem, idmap *idtools.IdentityMapping) (CacheContext, error) {
|
||||
|
@ -66,8 +71,7 @@ func ClearCacheContext(md *metadata.StorageItem) {
|
|||
}
|
||||
|
||||
type CacheContext interface {
|
||||
Checksum(ctx context.Context, ref cache.Mountable, p string, followLinks bool, s session.Group) (digest.Digest, error)
|
||||
ChecksumWildcard(ctx context.Context, ref cache.Mountable, p string, followLinks bool, s session.Group) (digest.Digest, error)
|
||||
Checksum(ctx context.Context, ref cache.Mountable, p string, opts ChecksumOpts, s session.Group) (digest.Digest, error)
|
||||
HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) error
|
||||
}
|
||||
|
||||
|
@ -75,7 +79,7 @@ type Hashed interface {
|
|||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
type Wildcard struct {
|
||||
type IncludedPath struct {
|
||||
Path string
|
||||
Record *CacheRecord
|
||||
}
|
||||
|
@ -86,20 +90,12 @@ type cacheManager struct {
|
|||
lruMu sync.Mutex
|
||||
}
|
||||
|
||||
func (cm *cacheManager) Checksum(ctx context.Context, ref cache.ImmutableRef, p string, followLinks bool, s session.Group) (digest.Digest, error) {
|
||||
func (cm *cacheManager) Checksum(ctx context.Context, ref cache.ImmutableRef, p string, opts ChecksumOpts, s session.Group) (digest.Digest, error) {
|
||||
cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata()), ref.IdentityMapping())
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
return cc.Checksum(ctx, ref, p, followLinks, s)
|
||||
}
|
||||
|
||||
func (cm *cacheManager) ChecksumWildcard(ctx context.Context, ref cache.ImmutableRef, p string, followLinks bool, s session.Group) (digest.Digest, error) {
|
||||
cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata()), ref.IdentityMapping())
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
return cc.ChecksumWildcard(ctx, ref, p, followLinks, s)
|
||||
return cc.Checksum(ctx, ref, p, opts, s)
|
||||
}
|
||||
|
||||
func (cm *cacheManager) GetCacheContext(ctx context.Context, md *metadata.StorageItem, idmap *idtools.IdentityMapping) (CacheContext, error) {
|
||||
|
@ -264,12 +260,17 @@ func (cc *cacheContext) save() error {
|
|||
return cc.md.SetExternal(keyContentHash, dt)
|
||||
}
|
||||
|
||||
// HandleChange notifies the source about a modification operation
|
||||
func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) {
|
||||
func keyPath(p string) string {
|
||||
p = path.Join("/", filepath.ToSlash(p))
|
||||
if p == "/" {
|
||||
p = ""
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// HandleChange notifies the source about a modification operation
|
||||
func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) {
|
||||
p = keyPath(p)
|
||||
k := convertPathToKey([]byte(p))
|
||||
|
||||
deleteDir := func(cr *CacheRecord) {
|
||||
|
@ -382,36 +383,40 @@ func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.Fil
|
|||
return nil
|
||||
}
|
||||
|
||||
func (cc *cacheContext) ChecksumWildcard(ctx context.Context, mountable cache.Mountable, p string, followLinks bool, s session.Group) (digest.Digest, error) {
|
||||
func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable, p string, opts ChecksumOpts, s session.Group) (digest.Digest, error) {
|
||||
m := &mount{mountable: mountable, session: s}
|
||||
defer m.clean()
|
||||
|
||||
wildcards, err := cc.wildcards(ctx, m, p)
|
||||
if !opts.Wildcard && len(opts.IncludePatterns) == 0 && len(opts.ExcludePatterns) == 0 {
|
||||
return cc.checksumFollow(ctx, m, p, opts.FollowLinks)
|
||||
}
|
||||
|
||||
includedPaths, err := cc.includedPaths(ctx, m, p, opts)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if followLinks {
|
||||
for i, w := range wildcards {
|
||||
if opts.FollowLinks {
|
||||
for i, w := range includedPaths {
|
||||
if w.Record.Type == CacheRecordTypeSymlink {
|
||||
dgst, err := cc.checksumFollow(ctx, m, w.Path, followLinks)
|
||||
dgst, err := cc.checksumFollow(ctx, m, w.Path, opts.FollowLinks)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
wildcards[i].Record = &CacheRecord{Digest: dgst}
|
||||
includedPaths[i].Record = &CacheRecord{Digest: dgst}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(wildcards) == 0 {
|
||||
if len(includedPaths) == 0 {
|
||||
return digest.FromBytes([]byte{}), nil
|
||||
}
|
||||
|
||||
if len(wildcards) == 1 && path.Base(p) == path.Base(wildcards[0].Path) {
|
||||
return wildcards[0].Record.Digest, nil
|
||||
if len(includedPaths) == 1 && path.Base(p) == path.Base(includedPaths[0].Path) {
|
||||
return includedPaths[0].Record.Digest, nil
|
||||
}
|
||||
|
||||
digester := digest.Canonical.Digester()
|
||||
for i, w := range wildcards {
|
||||
for i, w := range includedPaths {
|
||||
if i != 0 {
|
||||
digester.Hash().Write([]byte{0})
|
||||
}
|
||||
|
@ -421,13 +426,6 @@ func (cc *cacheContext) ChecksumWildcard(ctx context.Context, mountable cache.Mo
|
|||
return digester.Digest(), nil
|
||||
}
|
||||
|
||||
func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable, p string, followLinks bool, s session.Group) (digest.Digest, error) {
|
||||
m := &mount{mountable: mountable, session: s}
|
||||
defer m.clean()
|
||||
|
||||
return cc.checksumFollow(ctx, m, p, followLinks)
|
||||
}
|
||||
|
||||
func (cc *cacheContext) checksumFollow(ctx context.Context, m *mount, p string, follow bool) (digest.Digest, error) {
|
||||
const maxSymlinkLimit = 255
|
||||
i := 0
|
||||
|
@ -452,7 +450,7 @@ func (cc *cacheContext) checksumFollow(ctx context.Context, m *mount, p string,
|
|||
}
|
||||
}
|
||||
|
||||
func (cc *cacheContext) wildcards(ctx context.Context, m *mount, p string) ([]*Wildcard, error) {
|
||||
func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, opts ChecksumOpts) ([]*IncludedPath, error) {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
|
||||
|
@ -478,32 +476,103 @@ func (cc *cacheContext) wildcards(ctx context.Context, m *mount, p string) ([]*W
|
|||
}
|
||||
}()
|
||||
|
||||
p = path.Join("/", filepath.ToSlash(p))
|
||||
if p == "/" {
|
||||
p = ""
|
||||
endsInSep := len(p) != 0 && p[len(p)-1] == filepath.Separator
|
||||
p = keyPath(p)
|
||||
|
||||
var includePatternMatcher *fileutils.PatternMatcher
|
||||
if len(opts.IncludePatterns) != 0 {
|
||||
rootedIncludePatterns := make([]string, len(opts.IncludePatterns))
|
||||
for i, includePattern := range opts.IncludePatterns {
|
||||
rootedIncludePatterns[i] = keyPath(includePattern)
|
||||
}
|
||||
includePatternMatcher, err = fileutils.NewPatternMatcher(rootedIncludePatterns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid includepatterns: %s", opts.IncludePatterns)
|
||||
}
|
||||
}
|
||||
|
||||
wildcards := make([]*Wildcard, 0, 2)
|
||||
var excludePatternMatcher *fileutils.PatternMatcher
|
||||
if len(opts.ExcludePatterns) != 0 {
|
||||
rootedExcludePatterns := make([]string, len(opts.ExcludePatterns))
|
||||
for i, excludePattern := range opts.ExcludePatterns {
|
||||
rootedExcludePatterns[i] = keyPath(excludePattern)
|
||||
}
|
||||
excludePatternMatcher, err = fileutils.NewPatternMatcher(rootedExcludePatterns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid excludepatterns: %s", opts.ExcludePatterns)
|
||||
}
|
||||
}
|
||||
|
||||
includedPaths := make([]*IncludedPath, 0, 2)
|
||||
|
||||
txn := cc.tree.Txn()
|
||||
root = txn.Root()
|
||||
var updated bool
|
||||
var (
|
||||
updated bool
|
||||
iter *iradix.Seeker
|
||||
k []byte
|
||||
kOk bool
|
||||
)
|
||||
|
||||
iter := root.Seek([]byte{})
|
||||
for {
|
||||
k, _, ok := iter.Next()
|
||||
if !ok {
|
||||
break
|
||||
if opts.Wildcard {
|
||||
iter = root.Seek([]byte{})
|
||||
k, _, kOk = iter.Next()
|
||||
} else {
|
||||
k = convertPathToKey([]byte(p))
|
||||
if _, kOk = root.Get(k); kOk {
|
||||
iter = root.Seek(k)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
parentDirHeaders []*IncludedPath
|
||||
lastMatchedDir string
|
||||
)
|
||||
|
||||
for kOk {
|
||||
fn := string(convertKeyToPath(k))
|
||||
|
||||
for len(parentDirHeaders) != 0 {
|
||||
lastParentDir := parentDirHeaders[len(parentDirHeaders)-1]
|
||||
if strings.HasPrefix(fn, lastParentDir.Path+"/") {
|
||||
break
|
||||
}
|
||||
parentDirHeaders = parentDirHeaders[:len(parentDirHeaders)-1]
|
||||
}
|
||||
|
||||
dirHeader := false
|
||||
if len(k) > 0 && k[len(k)-1] == byte(0) {
|
||||
dirHeader = true
|
||||
fn = fn[:len(fn)-1]
|
||||
if fn == p && endsInSep {
|
||||
// We don't include the metadata header for a source dir which ends with a separator
|
||||
k, _, kOk = iter.Next()
|
||||
continue
|
||||
}
|
||||
}
|
||||
if opts.Wildcard {
|
||||
if lastMatchedDir == "" || !strings.HasPrefix(fn, lastMatchedDir+"/") {
|
||||
include, err := path.Match(p, fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !include {
|
||||
k, _, kOk = iter.Next()
|
||||
continue
|
||||
}
|
||||
lastMatchedDir = fn
|
||||
}
|
||||
} else if !strings.HasPrefix(fn+"/", p+"/") {
|
||||
k, _, kOk = iter.Next()
|
||||
continue
|
||||
}
|
||||
fn := convertKeyToPath(k)
|
||||
b, err := path.Match(p, string(fn))
|
||||
|
||||
shouldInclude, err := shouldIncludePath(p, fn, includePatternMatcher, excludePatternMatcher)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !b {
|
||||
if !shouldInclude && !dirHeader {
|
||||
k, _, kOk = iter.Next()
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -515,24 +584,64 @@ func (cc *cacheContext) wildcards(ctx context.Context, m *mount, p string) ([]*W
|
|||
updated = true
|
||||
}
|
||||
|
||||
wildcards = append(wildcards, &Wildcard{Path: string(fn), Record: cr})
|
||||
|
||||
if cr.Type == CacheRecordTypeDir {
|
||||
iter = root.Seek(append(k, 0, 0xff))
|
||||
// We only hash dir headers and files, not dir contents. Hashing
|
||||
// dir contents could be wrong if there are exclusions within the
|
||||
// dir.
|
||||
shouldInclude = false
|
||||
}
|
||||
|
||||
if !shouldInclude {
|
||||
if cr.Type == CacheRecordTypeDirHeader {
|
||||
// We keep track of non-included parent dir headers in case an
|
||||
// include pattern matches a file inside one of these dirs.
|
||||
parentDirHeaders = append(parentDirHeaders, &IncludedPath{Path: fn, Record: cr})
|
||||
}
|
||||
} else {
|
||||
includedPaths = append(includedPaths, parentDirHeaders...)
|
||||
parentDirHeaders = nil
|
||||
includedPaths = append(includedPaths, &IncludedPath{Path: fn, Record: cr})
|
||||
}
|
||||
k, _, kOk = iter.Next()
|
||||
}
|
||||
|
||||
cc.tree = txn.Commit()
|
||||
cc.dirty = updated
|
||||
|
||||
return wildcards, nil
|
||||
return includedPaths, nil
|
||||
}
|
||||
|
||||
func shouldIncludePath(
|
||||
p string,
|
||||
candidate string,
|
||||
includePatternMatcher *fileutils.PatternMatcher,
|
||||
excludePatternMatcher *fileutils.PatternMatcher,
|
||||
) (bool, error) {
|
||||
if includePatternMatcher != nil {
|
||||
m, err := includePatternMatcher.Matches(filepath.FromSlash(candidate))
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to match includepatterns")
|
||||
}
|
||||
if !m {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
if excludePatternMatcher != nil {
|
||||
m, err := excludePatternMatcher.Matches(filepath.FromSlash(candidate))
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to match excludepatterns")
|
||||
}
|
||||
if m {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (cc *cacheContext) checksumNoFollow(ctx context.Context, m *mount, p string) (*CacheRecord, error) {
|
||||
p = path.Join("/", filepath.ToSlash(p))
|
||||
if p == "/" {
|
||||
p = ""
|
||||
}
|
||||
p = keyPath(p)
|
||||
|
||||
cc.mu.RLock()
|
||||
if cc.txn == nil {
|
||||
|
|
15
vendor/github.com/moby/buildkit/cache/contenthash/checksum.pb.go
generated
vendored
15
vendor/github.com/moby/buildkit/cache/contenthash/checksum.pb.go
generated
vendored
|
@ -552,10 +552,7 @@ func (m *CacheRecord) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -673,10 +670,7 @@ func (m *CacheRecordWithPath) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -760,10 +754,7 @@ func (m *CacheRecords) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
|
53
vendor/github.com/moby/buildkit/cache/manager.go
generated
vendored
53
vendor/github.com/moby/buildkit/cache/manager.go
generated
vendored
|
@ -150,11 +150,14 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if len(sis) > 0 {
|
||||
ref, err := cm.get(ctx, sis[0].ID(), opts...)
|
||||
for _, si := range sis {
|
||||
ref, err := cm.get(ctx, si.ID(), opts...)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
return nil, errors.Wrapf(err, "failed to get record %s by blobchainid", sis[0].ID())
|
||||
}
|
||||
if ref == nil {
|
||||
continue
|
||||
}
|
||||
if p != nil {
|
||||
releaseParent = true
|
||||
}
|
||||
|
@ -170,12 +173,15 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor,
|
|||
}
|
||||
|
||||
var link ImmutableRef
|
||||
if len(sis) > 0 {
|
||||
ref, err := cm.get(ctx, sis[0].ID(), opts...)
|
||||
for _, si := range sis {
|
||||
ref, err := cm.get(ctx, si.ID(), opts...)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
return nil, errors.Wrapf(err, "failed to get record %s by chainid", sis[0].ID())
|
||||
}
|
||||
link = ref
|
||||
if ref != nil {
|
||||
link = ref
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
id := identity.NewID()
|
||||
|
@ -513,7 +519,16 @@ func (cm *cacheManager) New(ctx context.Context, s ImmutableRef, sess session.Gr
|
|||
return nil, errors.Wrapf(err, "failed to add snapshot %s to lease", id)
|
||||
}
|
||||
|
||||
if err := cm.Snapshotter.Prepare(ctx, id, parentSnapshotID); err != nil {
|
||||
if cm.Snapshotter.Name() == "stargz" && parent != nil {
|
||||
if rerr := parent.withRemoteSnapshotLabelsStargzMode(ctx, sess, func() {
|
||||
err = cm.Snapshotter.Prepare(ctx, id, parentSnapshotID)
|
||||
}); rerr != nil {
|
||||
return nil, rerr
|
||||
}
|
||||
} else {
|
||||
err = cm.Snapshotter.Prepare(ctx, id, parentSnapshotID)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to prepare %s", id)
|
||||
}
|
||||
|
||||
|
@ -756,6 +771,22 @@ func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt
|
|||
return nil
|
||||
}
|
||||
|
||||
// calculate sizes here so that lock does not need to be held for slow process
|
||||
for _, cr := range toDelete {
|
||||
size := getSize(cr.md)
|
||||
|
||||
if size == sizeUnknown && cr.equalImmutable != nil {
|
||||
size = getSize(cr.equalImmutable.md) // benefit from DiskUsage calc
|
||||
}
|
||||
if size == sizeUnknown {
|
||||
// calling size will warm cache for next call
|
||||
if _, err := cr.Size(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cm.mu.Lock()
|
||||
var err error
|
||||
for _, cr := range toDelete {
|
||||
cr.mu.Lock()
|
||||
|
@ -779,15 +810,6 @@ func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt
|
|||
if c.Size == sizeUnknown && cr.equalImmutable != nil {
|
||||
c.Size = getSize(cr.equalImmutable.md) // benefit from DiskUsage calc
|
||||
}
|
||||
if c.Size == sizeUnknown {
|
||||
cr.mu.Unlock() // all the non-prune modifications already protected by cr.dead
|
||||
s, err := cr.Size(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Size = s
|
||||
cr.mu.Lock()
|
||||
}
|
||||
|
||||
opt.totalSize -= c.Size
|
||||
|
||||
|
@ -805,6 +827,7 @@ func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt
|
|||
}
|
||||
cr.mu.Unlock()
|
||||
}
|
||||
cm.mu.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
42
vendor/github.com/moby/buildkit/cache/metadata/metadata.go
generated
vendored
42
vendor/github.com/moby/buildkit/cache/metadata/metadata.go
generated
vendored
|
@ -205,10 +205,11 @@ func (s *Store) Close() error {
|
|||
|
||||
type StorageItem struct {
|
||||
id string
|
||||
vmu sync.RWMutex
|
||||
values map[string]*Value
|
||||
qmu sync.Mutex
|
||||
queue []func(*bolt.Bucket) error
|
||||
storage *Store
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func newStorageItem(id string, b *bolt.Bucket, s *Store) (*StorageItem, error) {
|
||||
|
@ -242,10 +243,6 @@ func (s *StorageItem) ID() string {
|
|||
return s.id
|
||||
}
|
||||
|
||||
func (s *StorageItem) View(fn func(b *bolt.Bucket) error) error {
|
||||
return s.storage.View(s.id, fn)
|
||||
}
|
||||
|
||||
func (s *StorageItem) Update(fn func(b *bolt.Bucket) error) error {
|
||||
return s.storage.Update(s.id, fn)
|
||||
}
|
||||
|
@ -255,17 +252,19 @@ func (s *StorageItem) Metadata() *StorageItem {
|
|||
}
|
||||
|
||||
func (s *StorageItem) Keys() []string {
|
||||
s.vmu.RLock()
|
||||
keys := make([]string, 0, len(s.values))
|
||||
for k := range s.values {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
s.vmu.RUnlock()
|
||||
return keys
|
||||
}
|
||||
|
||||
func (s *StorageItem) Get(k string) *Value {
|
||||
s.mu.RLock()
|
||||
s.vmu.RLock()
|
||||
v := s.values[k]
|
||||
s.mu.RUnlock()
|
||||
s.vmu.RUnlock()
|
||||
return v
|
||||
}
|
||||
|
||||
|
@ -280,10 +279,13 @@ func (s *StorageItem) GetExternal(k string) ([]byte, error) {
|
|||
if b == nil {
|
||||
return errors.WithStack(errNotFound)
|
||||
}
|
||||
dt = b.Get([]byte(k))
|
||||
if dt == nil {
|
||||
dt2 := b.Get([]byte(k))
|
||||
if dt2 == nil {
|
||||
return errors.WithStack(errNotFound)
|
||||
}
|
||||
// data needs to be copied as boltdb can reuse the buffer after View returns
|
||||
dt = make([]byte, len(dt2))
|
||||
copy(dt, dt2)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -307,14 +309,14 @@ func (s *StorageItem) SetExternal(k string, dt []byte) error {
|
|||
}
|
||||
|
||||
func (s *StorageItem) Queue(fn func(b *bolt.Bucket) error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.qmu.Lock()
|
||||
defer s.qmu.Unlock()
|
||||
s.queue = append(s.queue, fn)
|
||||
}
|
||||
|
||||
func (s *StorageItem) Commit() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.qmu.Lock()
|
||||
defer s.qmu.Unlock()
|
||||
return errors.WithStack(s.Update(func(b *bolt.Bucket) error {
|
||||
for _, fn := range s.queue {
|
||||
if err := fn(b); err != nil {
|
||||
|
@ -327,15 +329,23 @@ func (s *StorageItem) Commit() error {
|
|||
}
|
||||
|
||||
func (s *StorageItem) Indexes() (out []string) {
|
||||
s.vmu.RLock()
|
||||
for _, v := range s.values {
|
||||
if v.Index != "" {
|
||||
out = append(out, v.Index)
|
||||
}
|
||||
}
|
||||
s.vmu.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (s *StorageItem) SetValue(b *bolt.Bucket, key string, v *Value) error {
|
||||
s.vmu.Lock()
|
||||
defer s.vmu.Unlock()
|
||||
return s.setValue(b, key, v)
|
||||
}
|
||||
|
||||
func (s *StorageItem) setValue(b *bolt.Bucket, key string, v *Value) error {
|
||||
if v == nil {
|
||||
if old, ok := s.values[key]; ok {
|
||||
if old.Index != "" {
|
||||
|
@ -375,16 +385,16 @@ func (s *StorageItem) SetValue(b *bolt.Bucket, key string, v *Value) error {
|
|||
var ErrSkipSetValue = errors.New("skip setting metadata value")
|
||||
|
||||
func (s *StorageItem) GetAndSetValue(key string, fn func(*Value) (*Value, error)) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.Update(func(b *bolt.Bucket) error {
|
||||
s.vmu.Lock()
|
||||
defer s.vmu.Unlock()
|
||||
v, err := fn(s.values[key])
|
||||
if errors.Is(err, ErrSkipSetValue) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.SetValue(b, key, v)
|
||||
return s.setValue(b, key, v)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
201
vendor/github.com/moby/buildkit/cache/refs.go
generated
vendored
201
vendor/github.com/moby/buildkit/cache/refs.go
generated
vendored
|
@ -155,8 +155,18 @@ func (cr *cacheRecord) isLazy(ctx context.Context) (bool, error) {
|
|||
_, err := cr.cm.ContentStore.Info(ctx, digest.Digest(dgst))
|
||||
if errors.Is(err, errdefs.ErrNotFound) {
|
||||
return true, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return false, err
|
||||
|
||||
// If the snapshot is a remote snapshot, this layer is lazy.
|
||||
if info, err := cr.cm.Snapshotter.Stat(ctx, getSnapshotID(cr.md)); err == nil {
|
||||
if _, ok := info.Labels["containerd.io/snapshot/remote"]; ok {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (cr *cacheRecord) IdentityMapping() *idtools.IdentityMapping {
|
||||
|
@ -381,6 +391,20 @@ func (sr *immutableRef) Mount(ctx context.Context, readonly bool, s session.Grou
|
|||
|
||||
sr.mu.Lock()
|
||||
defer sr.mu.Unlock()
|
||||
|
||||
if sr.cm.Snapshotter.Name() == "stargz" {
|
||||
var (
|
||||
m snapshot.Mountable
|
||||
rerr error
|
||||
)
|
||||
if err := sr.withRemoteSnapshotLabelsStargzMode(ctx, s, func() {
|
||||
m, rerr = sr.mount(ctx, readonly)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, rerr
|
||||
}
|
||||
|
||||
return sr.mount(ctx, readonly)
|
||||
}
|
||||
|
||||
|
@ -400,66 +424,154 @@ func (sr *immutableRef) Extract(ctx context.Context, s session.Group) (rerr erro
|
|||
}
|
||||
|
||||
if sr.cm.Snapshotter.Name() == "stargz" {
|
||||
if _, err := sr.prepareRemoteSnapshots(ctx, sr.descHandlers); err != nil {
|
||||
if err := sr.withRemoteSnapshotLabelsStargzMode(ctx, s, func() {
|
||||
if rerr = sr.prepareRemoteSnapshotsStargzMode(ctx, s); rerr != nil {
|
||||
return
|
||||
}
|
||||
rerr = sr.extract(ctx, sr.descHandlers, s)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return rerr
|
||||
}
|
||||
|
||||
return sr.extract(ctx, sr.descHandlers, s)
|
||||
}
|
||||
|
||||
func (sr *immutableRef) prepareRemoteSnapshots(ctx context.Context, dhs DescHandlers) (bool, error) {
|
||||
ok, err := sr.sizeG.Do(ctx, sr.ID()+"-prepare-remote-snapshot", func(ctx context.Context) (_ interface{}, rerr error) {
|
||||
snapshotID := getSnapshotID(sr.md)
|
||||
if _, err := sr.cm.Snapshotter.Stat(ctx, snapshotID); err == nil {
|
||||
return true, nil
|
||||
func (sr *immutableRef) withRemoteSnapshotLabelsStargzMode(ctx context.Context, s session.Group, f func()) error {
|
||||
dhs := sr.descHandlers
|
||||
for _, r := range sr.parentRefChain() {
|
||||
r := r
|
||||
info, err := r.cm.Snapshotter.Stat(ctx, getSnapshotID(r.md))
|
||||
if err != nil && !errdefs.IsNotFound(err) {
|
||||
return err
|
||||
} else if errdefs.IsNotFound(err) {
|
||||
continue // This snpashot doesn't exist; skip
|
||||
} else if _, ok := info.Labels["containerd.io/snapshot/remote"]; !ok {
|
||||
continue // This isn't a remote snapshot; skip
|
||||
}
|
||||
desc, err := sr.ociDesc()
|
||||
desc, err := r.ociDesc()
|
||||
if err != nil {
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
dh := dhs[desc.Digest]
|
||||
if dh == nil {
|
||||
return false, nil
|
||||
continue // no info passed; skip
|
||||
}
|
||||
|
||||
parentID := ""
|
||||
if sr.parent != nil {
|
||||
if ok, err := sr.parent.prepareRemoteSnapshots(ctx, dhs); !ok {
|
||||
return false, err
|
||||
// Append temporary labels (based on dh.SnapshotLabels) as hints for remote snapshots.
|
||||
// For avoiding collosion among calls, keys of these tmp labels contain an unique ID.
|
||||
flds, labels := makeTmpLabelsStargzMode(snapshots.FilterInheritedLabels(dh.SnapshotLabels), s)
|
||||
info.Labels = labels
|
||||
if _, err := r.cm.Snapshotter.Update(ctx, info, flds...); err != nil {
|
||||
return errors.Wrapf(err, "failed to add tmp remote labels for remote snapshot")
|
||||
}
|
||||
defer func() {
|
||||
for k := range info.Labels {
|
||||
info.Labels[k] = "" // Remove labels appended in this call
|
||||
}
|
||||
parentID = getSnapshotID(sr.parent.md)
|
||||
}
|
||||
if _, err := r.cm.Snapshotter.Update(ctx, info, flds...); err != nil {
|
||||
logrus.Warn(errors.Wrapf(err, "failed to remove tmp remote labels"))
|
||||
}
|
||||
}()
|
||||
|
||||
// Hint labels to the snapshotter
|
||||
labels := dh.SnapshotLabels
|
||||
if labels == nil {
|
||||
labels = make(map[string]string)
|
||||
}
|
||||
labels["containerd.io/snapshot.ref"] = snapshotID
|
||||
opt := snapshots.WithLabels(labels)
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to preapre the remote snapshot
|
||||
key := fmt.Sprintf("tmp-%s %s", identity.NewID(), sr.Info().ChainID)
|
||||
if err = sr.cm.Snapshotter.Prepare(ctx, key, parentID, opt); err != nil {
|
||||
if errdefs.IsAlreadyExists(err) {
|
||||
// Check if the targeting snapshot ID has been prepared as a remote
|
||||
// snapshot in the snapshotter.
|
||||
if _, err := sr.cm.Snapshotter.Stat(ctx, snapshotID); err == nil {
|
||||
// We can use this remote snapshot without unlazying.
|
||||
// Try the next layer as well.
|
||||
return true, nil
|
||||
f()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s session.Group) error {
|
||||
_, err := sr.sizeG.Do(ctx, sr.ID()+"-prepare-remote-snapshot", func(ctx context.Context) (_ interface{}, rerr error) {
|
||||
dhs := sr.descHandlers
|
||||
for _, r := range sr.parentRefChain() {
|
||||
r := r
|
||||
snapshotID := getSnapshotID(r.md)
|
||||
if _, err := r.cm.Snapshotter.Stat(ctx, snapshotID); err == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
desc, err := r.ociDesc()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dh := dhs[desc.Digest]
|
||||
if dh == nil {
|
||||
// We cannot prepare remote snapshots without descHandler.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// tmpLabels contains dh.SnapshotLabels + session IDs. All keys contain
|
||||
// an unique ID for avoiding the collision among snapshotter API calls to
|
||||
// this snapshot. tmpLabels will be removed at the end of this function.
|
||||
defaultLabels := snapshots.FilterInheritedLabels(dh.SnapshotLabels)
|
||||
if defaultLabels == nil {
|
||||
defaultLabels = make(map[string]string)
|
||||
}
|
||||
tmpFields, tmpLabels := makeTmpLabelsStargzMode(defaultLabels, s)
|
||||
defaultLabels["containerd.io/snapshot.ref"] = snapshotID
|
||||
|
||||
// Prepare remote snapshots
|
||||
var (
|
||||
key = fmt.Sprintf("tmp-%s %s", identity.NewID(), r.Info().ChainID)
|
||||
opts = []snapshots.Opt{
|
||||
snapshots.WithLabels(defaultLabels),
|
||||
snapshots.WithLabels(tmpLabels),
|
||||
}
|
||||
)
|
||||
parentID := ""
|
||||
if r.parent != nil {
|
||||
parentID = getSnapshotID(r.parent.md)
|
||||
}
|
||||
if err = r.cm.Snapshotter.Prepare(ctx, key, parentID, opts...); err != nil {
|
||||
if errdefs.IsAlreadyExists(err) {
|
||||
// Check if the targeting snapshot ID has been prepared as
|
||||
// a remote snapshot in the snapshotter.
|
||||
info, err := r.cm.Snapshotter.Stat(ctx, snapshotID)
|
||||
if err == nil { // usable as remote snapshot without unlazying.
|
||||
defer func() {
|
||||
// Remove tmp labels appended in this func
|
||||
for k := range tmpLabels {
|
||||
info.Labels[k] = ""
|
||||
}
|
||||
if _, err := r.cm.Snapshotter.Update(ctx, info, tmpFields...); err != nil {
|
||||
logrus.Warn(errors.Wrapf(err,
|
||||
"failed to remove tmp remote labels after prepare"))
|
||||
}
|
||||
}()
|
||||
|
||||
// Try the next layer as well.
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This layer and all upper layers cannot be prepared without unlazying.
|
||||
break
|
||||
}
|
||||
|
||||
// This layer cannot be prepared without unlazying.
|
||||
return false, nil
|
||||
return nil, nil
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
|
||||
func makeTmpLabelsStargzMode(labels map[string]string, s session.Group) (fields []string, res map[string]string) {
|
||||
res = make(map[string]string)
|
||||
// Append unique ID to labels for avoiding collision of labels among calls
|
||||
id := identity.NewID()
|
||||
for k, v := range labels {
|
||||
tmpKey := k + "." + id
|
||||
fields = append(fields, "labels."+tmpKey)
|
||||
res[tmpKey] = v
|
||||
}
|
||||
return ok.(bool), err
|
||||
for i, sid := range session.AllSessionIDs(s) {
|
||||
sidKey := "containerd.io/snapshot/remote/stargz.session." + fmt.Sprintf("%d", i) + "." + id
|
||||
fields = append(fields, "labels."+sidKey)
|
||||
res[sidKey] = sid
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (sr *immutableRef) extract(ctx context.Context, dhs DescHandlers, s session.Group) error {
|
||||
|
@ -725,6 +837,19 @@ func (sr *mutableRef) Mount(ctx context.Context, readonly bool, s session.Group)
|
|||
sr.mu.Lock()
|
||||
defer sr.mu.Unlock()
|
||||
|
||||
if sr.cm.Snapshotter.Name() == "stargz" && sr.parent != nil {
|
||||
var (
|
||||
m snapshot.Mountable
|
||||
rerr error
|
||||
)
|
||||
if err := sr.parent.withRemoteSnapshotLabelsStargzMode(ctx, s, func() {
|
||||
m, rerr = sr.mount(ctx, readonly)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, rerr
|
||||
}
|
||||
|
||||
return sr.mount(ctx, readonly)
|
||||
}
|
||||
|
||||
|
|
33
vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go
generated
vendored
33
vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go
generated
vendored
|
@ -2,6 +2,7 @@ package cacheimport
|
|||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
|
@ -46,7 +47,9 @@ func (c *CacheChains) normalize() error {
|
|||
|
||||
validated := make([]*item, 0, len(c.items))
|
||||
for _, it := range c.items {
|
||||
it.backlinksMu.Lock()
|
||||
it.validate()
|
||||
it.backlinksMu.Unlock()
|
||||
}
|
||||
for _, it := range c.items {
|
||||
if !it.invalid {
|
||||
|
@ -62,6 +65,8 @@ func (c *CacheChains) normalize() error {
|
|||
}
|
||||
}
|
||||
|
||||
st.removeLoops()
|
||||
|
||||
items := make([]*item, 0, len(st.byKey))
|
||||
for _, it := range st.byKey {
|
||||
items = append(items, it)
|
||||
|
@ -110,9 +115,10 @@ type item struct {
|
|||
result *solver.Remote
|
||||
resultTime time.Time
|
||||
|
||||
links []map[link]struct{}
|
||||
backlinks map[*item]struct{}
|
||||
invalid bool
|
||||
links []map[link]struct{}
|
||||
backlinksMu sync.Mutex
|
||||
backlinks map[*item]struct{}
|
||||
invalid bool
|
||||
}
|
||||
|
||||
type link struct {
|
||||
|
@ -120,6 +126,25 @@ type link struct {
|
|||
selector string
|
||||
}
|
||||
|
||||
func (c *item) removeLink(src *item) bool {
|
||||
found := false
|
||||
for idx := range c.links {
|
||||
for l := range c.links[idx] {
|
||||
if l.src == src {
|
||||
delete(c.links[idx], l)
|
||||
found = true
|
||||
}
|
||||
}
|
||||
}
|
||||
for idx := range c.links {
|
||||
if len(c.links[idx]) == 0 {
|
||||
c.links = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
return found
|
||||
}
|
||||
|
||||
func (c *item) AddResult(createdAt time.Time, result *solver.Remote) {
|
||||
c.resultTime = createdAt
|
||||
c.result = result
|
||||
|
@ -139,7 +164,9 @@ func (c *item) LinkFrom(rec solver.CacheExporterRecord, index int, selector stri
|
|||
}
|
||||
|
||||
c.links[index][link{src: src, selector: selector}] = struct{}{}
|
||||
src.backlinksMu.Lock()
|
||||
src.backlinks[c] = struct{}{}
|
||||
src.backlinksMu.Unlock()
|
||||
}
|
||||
|
||||
func (c *item) validate() {
|
||||
|
|
48
vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go
generated
vendored
48
vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go
generated
vendored
|
@ -8,6 +8,7 @@ import (
|
|||
"github.com/moby/buildkit/solver"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// EmptyLayerRemovalSupported defines if implementation supports removal of empty layers. Buildkit image exporter
|
||||
|
@ -130,6 +131,53 @@ type normalizeState struct {
|
|||
next int
|
||||
}
|
||||
|
||||
func (s *normalizeState) removeLoops() {
|
||||
roots := []digest.Digest{}
|
||||
for dgst, it := range s.byKey {
|
||||
if len(it.links) == 0 {
|
||||
roots = append(roots, dgst)
|
||||
}
|
||||
}
|
||||
|
||||
visited := map[digest.Digest]struct{}{}
|
||||
|
||||
for _, d := range roots {
|
||||
s.checkLoops(d, visited)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *normalizeState) checkLoops(d digest.Digest, visited map[digest.Digest]struct{}) {
|
||||
it, ok := s.byKey[d]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
links, ok := s.links[it]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
visited[d] = struct{}{}
|
||||
defer func() {
|
||||
delete(visited, d)
|
||||
}()
|
||||
|
||||
for l, ids := range links {
|
||||
for id := range ids {
|
||||
if _, ok := visited[id]; ok {
|
||||
it2, ok := s.byKey[id]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if !it2.removeLink(it) {
|
||||
logrus.Warnf("failed to remove looping cache key %s %s", d, id)
|
||||
}
|
||||
delete(links[l], id)
|
||||
} else {
|
||||
s.checkLoops(id, visited)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func normalizeItem(it *item, state *normalizeState) (*item, error) {
|
||||
if it2, ok := state.added[it]; ok {
|
||||
return it2, nil
|
||||
|
|
2
vendor/github.com/moby/buildkit/client/llb/exec.go
generated
vendored
2
vendor/github.com/moby/buildkit/client/llb/exec.go
generated
vendored
|
@ -233,6 +233,7 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
|
|||
HttpsProxy: p.HTTPSProxy,
|
||||
FtpProxy: p.FTPProxy,
|
||||
NoProxy: p.NoProxy,
|
||||
AllProxy: p.AllProxy,
|
||||
}
|
||||
addCap(&e.constraints, pb.CapExecMetaProxy)
|
||||
}
|
||||
|
@ -645,6 +646,7 @@ type ProxyEnv struct {
|
|||
HTTPSProxy string
|
||||
FTPProxy string
|
||||
NoProxy string
|
||||
AllProxy string
|
||||
}
|
||||
|
||||
type CacheMountSharingMode int
|
||||
|
|
18
vendor/github.com/moby/buildkit/client/llb/fileop.go
generated
vendored
18
vendor/github.com/moby/buildkit/client/llb/fileop.go
generated
vendored
|
@ -56,6 +56,10 @@ type subAction interface {
|
|||
toProtoAction(context.Context, string, pb.InputIndex) (pb.IsFileAction, error)
|
||||
}
|
||||
|
||||
type capAdder interface {
|
||||
addCaps(*FileOp)
|
||||
}
|
||||
|
||||
type FileAction struct {
|
||||
state *State
|
||||
prev *FileAction
|
||||
|
@ -427,6 +431,8 @@ type CopyInfo struct {
|
|||
Mode *os.FileMode
|
||||
FollowSymlinks bool
|
||||
CopyDirContentsOnly bool
|
||||
IncludePatterns []string
|
||||
ExcludePatterns []string
|
||||
AttemptUnpack bool
|
||||
CreateDestPath bool
|
||||
AllowWildcard bool
|
||||
|
@ -458,6 +464,8 @@ func (a *fileActionCopy) toProtoAction(ctx context.Context, parent string, base
|
|||
Src: src,
|
||||
Dest: normalizePath(parent, a.dest, true),
|
||||
Owner: a.info.ChownOpt.marshal(base),
|
||||
IncludePatterns: a.info.IncludePatterns,
|
||||
ExcludePatterns: a.info.ExcludePatterns,
|
||||
AllowWildcard: a.info.AllowWildcard,
|
||||
AllowEmptyWildcard: a.info.AllowEmptyWildcard,
|
||||
FollowSymlink: a.info.FollowSymlinks,
|
||||
|
@ -496,6 +504,12 @@ func (a *fileActionCopy) sourcePath(ctx context.Context) (string, error) {
|
|||
return p, nil
|
||||
}
|
||||
|
||||
func (a *fileActionCopy) addCaps(f *FileOp) {
|
||||
if len(a.info.IncludePatterns) != 0 || len(a.info.ExcludePatterns) != 0 {
|
||||
addCap(&f.constraints, pb.CapFileCopyIncludeExcludePatterns)
|
||||
}
|
||||
}
|
||||
|
||||
type CreatedTime time.Time
|
||||
|
||||
func WithCreatedTime(t time.Time) CreatedTime {
|
||||
|
@ -682,6 +696,10 @@ func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
|
|||
pop.Inputs = state.inputs
|
||||
|
||||
for i, st := range state.actions {
|
||||
if adder, isCapAdder := st.action.(capAdder); isCapAdder {
|
||||
adder.addCaps(f)
|
||||
}
|
||||
|
||||
output := pb.OutputIndex(-1)
|
||||
if i+1 == len(state.actions) {
|
||||
output = 0
|
||||
|
|
47
vendor/github.com/moby/buildkit/client/llb/source.go
generated
vendored
47
vendor/github.com/moby/buildkit/client/llb/source.go
generated
vendored
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/docker/distribution/reference"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/apicaps"
|
||||
"github.com/moby/buildkit/util/gitutil"
|
||||
"github.com/moby/buildkit/util/sshutil"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -198,52 +199,14 @@ type ImageInfo struct {
|
|||
RecordType string
|
||||
}
|
||||
|
||||
const (
|
||||
gitProtocolHTTP = iota + 1
|
||||
gitProtocolHTTPS
|
||||
gitProtocolSSH
|
||||
gitProtocolGit
|
||||
gitProtocolUnknown
|
||||
)
|
||||
|
||||
func getGitProtocol(remote string) (string, int) {
|
||||
prefixes := map[string]int{
|
||||
"http://": gitProtocolHTTP,
|
||||
"https://": gitProtocolHTTPS,
|
||||
"git://": gitProtocolGit,
|
||||
"ssh://": gitProtocolSSH,
|
||||
}
|
||||
protocolType := gitProtocolUnknown
|
||||
for prefix, potentialType := range prefixes {
|
||||
if strings.HasPrefix(remote, prefix) {
|
||||
remote = strings.TrimPrefix(remote, prefix)
|
||||
protocolType = potentialType
|
||||
}
|
||||
}
|
||||
|
||||
if protocolType == gitProtocolUnknown && sshutil.IsSSHTransport(remote) {
|
||||
protocolType = gitProtocolSSH
|
||||
}
|
||||
|
||||
// remove name from ssh
|
||||
if protocolType == gitProtocolSSH {
|
||||
parts := strings.SplitN(remote, "@", 2)
|
||||
if len(parts) == 2 {
|
||||
remote = parts[1]
|
||||
}
|
||||
}
|
||||
|
||||
return remote, protocolType
|
||||
}
|
||||
|
||||
func Git(remote, ref string, opts ...GitOption) State {
|
||||
url := strings.Split(remote, "#")[0]
|
||||
|
||||
var protocolType int
|
||||
remote, protocolType = getGitProtocol(remote)
|
||||
remote, protocolType = gitutil.ParseProtocol(remote)
|
||||
|
||||
var sshHost string
|
||||
if protocolType == gitProtocolSSH {
|
||||
if protocolType == gitutil.SSHProtocol {
|
||||
parts := strings.SplitN(remote, ":", 2)
|
||||
if len(parts) == 2 {
|
||||
sshHost = parts[0]
|
||||
|
@ -251,7 +214,7 @@ func Git(remote, ref string, opts ...GitOption) State {
|
|||
remote = parts[0] + "/" + parts[1]
|
||||
}
|
||||
}
|
||||
if protocolType == gitProtocolUnknown {
|
||||
if protocolType == gitutil.UnknownProtocol {
|
||||
url = "https://" + url
|
||||
}
|
||||
|
||||
|
@ -289,7 +252,7 @@ func Git(remote, ref string, opts ...GitOption) State {
|
|||
addCap(&gi.Constraints, pb.CapSourceGitHTTPAuth)
|
||||
}
|
||||
}
|
||||
if protocolType == gitProtocolSSH {
|
||||
if protocolType == gitutil.SSHProtocol {
|
||||
if gi.KnownSSHHosts != "" {
|
||||
attrs[pb.AttrKnownSSHHosts] = gi.KnownSSHHosts
|
||||
} else if sshHost != "" {
|
||||
|
|
122
vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go
generated
vendored
122
vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go
generated
vendored
|
@ -1,122 +0,0 @@
|
|||
package config
|
||||
|
||||
import "github.com/BurntSushi/toml"
|
||||
|
||||
// Config provides containerd configuration data for the server
|
||||
type Config struct {
|
||||
Debug bool `toml:"debug"`
|
||||
|
||||
// Root is the path to a directory where buildkit will store persistent data
|
||||
Root string `toml:"root"`
|
||||
|
||||
//Entitlements e.g. security.insecure, network.host
|
||||
Entitlements []string `toml:"insecure-entitlements"`
|
||||
// GRPC configuration settings
|
||||
GRPC GRPCConfig `toml:"grpc"`
|
||||
|
||||
Workers struct {
|
||||
OCI OCIConfig `toml:"oci"`
|
||||
Containerd ContainerdConfig `toml:"containerd"`
|
||||
} `toml:"worker"`
|
||||
|
||||
Registries map[string]RegistryConfig `toml:"registry"`
|
||||
|
||||
DNS *DNSConfig `toml:"dns"`
|
||||
}
|
||||
|
||||
type GRPCConfig struct {
|
||||
Address []string `toml:"address"`
|
||||
DebugAddress string `toml:"debugAddress"`
|
||||
UID int `toml:"uid"`
|
||||
GID int `toml:"gid"`
|
||||
|
||||
TLS TLSConfig `toml:"tls"`
|
||||
// MaxRecvMsgSize int `toml:"max_recv_message_size"`
|
||||
// MaxSendMsgSize int `toml:"max_send_message_size"`
|
||||
}
|
||||
|
||||
type RegistryConfig struct {
|
||||
Mirrors []string `toml:"mirrors"`
|
||||
PlainHTTP *bool `toml:"http"`
|
||||
Insecure *bool `toml:"insecure"`
|
||||
RootCAs []string `toml:"ca"`
|
||||
KeyPairs []TLSKeyPair `toml:"keypair"`
|
||||
TLSConfigDir []string `toml:"tlsconfigdir"`
|
||||
}
|
||||
|
||||
type TLSKeyPair struct {
|
||||
Key string `toml:"key"`
|
||||
Certificate string `toml:"cert"`
|
||||
}
|
||||
|
||||
type TLSConfig struct {
|
||||
Cert string `toml:"cert"`
|
||||
Key string `toml:"key"`
|
||||
CA string `toml:"ca"`
|
||||
}
|
||||
|
||||
type GCConfig struct {
|
||||
GC *bool `toml:"gc"`
|
||||
GCKeepStorage int64 `toml:"gckeepstorage"`
|
||||
GCPolicy []GCPolicy `toml:"gcpolicy"`
|
||||
}
|
||||
|
||||
type NetworkConfig struct {
|
||||
Mode string `toml:"networkMode"`
|
||||
CNIConfigPath string `toml:"cniConfigPath"`
|
||||
CNIBinaryPath string `toml:"cniBinaryPath"`
|
||||
}
|
||||
|
||||
type OCIConfig struct {
|
||||
Enabled *bool `toml:"enabled"`
|
||||
Labels map[string]string `toml:"labels"`
|
||||
Platforms []string `toml:"platforms"`
|
||||
Snapshotter string `toml:"snapshotter"`
|
||||
Rootless bool `toml:"rootless"`
|
||||
NoProcessSandbox bool `toml:"noProcessSandbox"`
|
||||
GCConfig
|
||||
NetworkConfig
|
||||
// UserRemapUnsupported is unsupported key for testing. The feature is
|
||||
// incomplete and the intention is to make it default without config.
|
||||
UserRemapUnsupported string `toml:"userRemapUnsupported"`
|
||||
// For use in storing the OCI worker binary name that will replace buildkit-runc
|
||||
Binary string `toml:"binary"`
|
||||
ProxySnapshotterPath string `toml:"proxySnapshotterPath"`
|
||||
|
||||
// StargzSnapshotterConfig is configuration for stargz snapshotter.
|
||||
// Decoding this is delayed in order to remove the dependency from this
|
||||
// config pkg to stargz snapshotter's config pkg.
|
||||
StargzSnapshotterConfig toml.Primitive `toml:"stargzSnapshotter"`
|
||||
|
||||
// ApparmorProfile is the name of the apparmor profile that should be used to constrain build containers.
|
||||
// The profile should already be loaded (by a higher level system) before creating a worker.
|
||||
ApparmorProfile string `toml:"apparmor-profile"`
|
||||
}
|
||||
|
||||
type ContainerdConfig struct {
|
||||
Address string `toml:"address"`
|
||||
Enabled *bool `toml:"enabled"`
|
||||
Labels map[string]string `toml:"labels"`
|
||||
Platforms []string `toml:"platforms"`
|
||||
Namespace string `toml:"namespace"`
|
||||
GCConfig
|
||||
NetworkConfig
|
||||
Snapshotter string `toml:"snapshotter"`
|
||||
|
||||
// ApparmorProfile is the name of the apparmor profile that should be used to constrain build containers.
|
||||
// The profile should already be loaded (by a higher level system) before creating a worker.
|
||||
ApparmorProfile string `toml:"apparmor-profile"`
|
||||
}
|
||||
|
||||
type GCPolicy struct {
|
||||
All bool `toml:"all"`
|
||||
KeepBytes int64 `toml:"keepBytes"`
|
||||
KeepDuration int64 `toml:"keepDuration"`
|
||||
Filters []string `toml:"filters"`
|
||||
}
|
||||
|
||||
type DNSConfig struct {
|
||||
Nameservers []string `toml:"nameservers"`
|
||||
Options []string `toml:"options"`
|
||||
SearchDomains []string `toml:"searchDomains"`
|
||||
}
|
31
vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go
generated
vendored
31
vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go
generated
vendored
|
@ -1,31 +0,0 @@
|
|||
package config
|
||||
|
||||
const defaultCap int64 = 2e9 // 2GB
|
||||
|
||||
func DefaultGCPolicy(p string, keep int64) []GCPolicy {
|
||||
if keep == 0 {
|
||||
keep = DetectDefaultGCCap(p)
|
||||
}
|
||||
return []GCPolicy{
|
||||
// if build cache uses more than 512MB delete the most easily reproducible data after it has not been used for 2 days
|
||||
{
|
||||
Filters: []string{"type==source.local,type==exec.cachemount,type==source.git.checkout"},
|
||||
KeepDuration: 48 * 3600, // 48h
|
||||
KeepBytes: 512 * 1e6, // 512MB
|
||||
},
|
||||
// remove any data not used for 60 days
|
||||
{
|
||||
KeepDuration: 60 * 24 * 3600, // 60d
|
||||
KeepBytes: keep,
|
||||
},
|
||||
// keep the unshared build cache under cap
|
||||
{
|
||||
KeepBytes: keep,
|
||||
},
|
||||
// if previous policies were insufficient start deleting internal data to keep build cache under cap
|
||||
{
|
||||
All: true,
|
||||
KeepBytes: keep,
|
||||
},
|
||||
}
|
||||
}
|
17
vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go
generated
vendored
17
vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go
generated
vendored
|
@ -1,17 +0,0 @@
|
|||
// +build !windows
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func DetectDefaultGCCap(root string) int64 {
|
||||
var st syscall.Statfs_t
|
||||
if err := syscall.Statfs(root, &st); err != nil {
|
||||
return defaultCap
|
||||
}
|
||||
diskSize := int64(st.Bsize) * int64(st.Blocks)
|
||||
avail := diskSize / 10
|
||||
return (avail/(1<<30) + 1) * 1e9 // round up
|
||||
}
|
7
vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go
generated
vendored
7
vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go
generated
vendored
|
@ -1,7 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
package config
|
||||
|
||||
func DetectDefaultGCCap(root string) int64 {
|
||||
return defaultCap
|
||||
}
|
2
vendor/github.com/moby/buildkit/executor/oci/resolvconf.go
generated
vendored
2
vendor/github.com/moby/buildkit/executor/oci/resolvconf.go
generated
vendored
|
@ -6,9 +6,9 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/libnetwork/resolvconf"
|
||||
"github.com/docker/docker/libnetwork/types"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/buildkit/util/flightcontrol"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
|
4
vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go
generated
vendored
4
vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go
generated
vendored
|
@ -19,8 +19,8 @@ import (
|
|||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/executor/oci"
|
||||
"github.com/moby/buildkit/frontend/gateway/errdefs"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/network"
|
||||
rootlessspecconv "github.com/moby/buildkit/util/rootless/specconv"
|
||||
|
@ -337,7 +337,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
|
|||
func exitError(ctx context.Context, err error) error {
|
||||
if err != nil {
|
||||
exitErr := &errdefs.ExitError{
|
||||
ExitCode: errdefs.ContainerdUnknownExitStatus,
|
||||
ExitCode: errdefs.UnknownExitStatus,
|
||||
Err: err,
|
||||
}
|
||||
var runcExitError *runc.ExitError
|
||||
|
|
4
vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
generated
vendored
4
vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
generated
vendored
|
@ -54,6 +54,7 @@ const (
|
|||
keyContextSubDir = "contextsubdir"
|
||||
keyContextKeepGitDir = "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR"
|
||||
keySyntax = "build-arg:BUILDKIT_SYNTAX"
|
||||
keyMultiPlatformArg = "build-arg:BUILDKIT_MULTI_PLATFORM"
|
||||
keyHostname = "hostname"
|
||||
)
|
||||
|
||||
|
@ -369,6 +370,9 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
|
|||
|
||||
exportMap := len(targetPlatforms) > 1
|
||||
|
||||
if v := opts[keyMultiPlatformArg]; v != "" {
|
||||
opts[keyMultiPlatform] = v
|
||||
}
|
||||
if v := opts[keyMultiPlatform]; v != "" {
|
||||
b, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
|
|
98
vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
generated
vendored
98
vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
generated
vendored
|
@ -507,7 +507,7 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error {
|
|||
case *instructions.AddCommand:
|
||||
err = dispatchCopy(d, c.SourcesAndDest, opt.buildContext, true, c, c.Chown, c.Chmod, c.Location(), opt)
|
||||
if err == nil {
|
||||
for _, src := range c.Sources() {
|
||||
for _, src := range c.SourcePaths {
|
||||
if !strings.HasPrefix(src, "http://") && !strings.HasPrefix(src, "https://") {
|
||||
d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{}
|
||||
}
|
||||
|
@ -542,7 +542,7 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error {
|
|||
}
|
||||
err = dispatchCopy(d, c.SourcesAndDest, l, false, c, c.Chown, c.Chmod, c.Location(), opt)
|
||||
if err == nil && len(cmd.sources) == 0 {
|
||||
for _, src := range c.Sources() {
|
||||
for _, src := range c.SourcePaths {
|
||||
d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
@ -647,15 +647,63 @@ func dispatchEnv(d *dispatchState, c *instructions.EnvCommand) error {
|
|||
}
|
||||
|
||||
func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyEnv, sources []*dispatchState, dopt dispatchOpt) error {
|
||||
var opt []llb.RunOption
|
||||
|
||||
var args []string = c.CmdLine
|
||||
if len(c.Files) > 0 {
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("parsing produced an invalid run command: %v", args)
|
||||
}
|
||||
|
||||
if heredoc := parser.MustParseHeredoc(args[0]); heredoc != nil {
|
||||
if d.image.OS != "windows" && strings.HasPrefix(c.Files[0].Data, "#!") {
|
||||
// This is a single heredoc with a shebang, so create a file
|
||||
// and run it.
|
||||
// NOTE: choosing to expand doesn't really make sense here, so
|
||||
// we silently ignore that option if it was provided.
|
||||
sourcePath := "/"
|
||||
destPath := "/dev/pipes/"
|
||||
|
||||
f := c.Files[0].Name
|
||||
data := c.Files[0].Data
|
||||
if c.Files[0].Chomp {
|
||||
data = parser.ChompHeredocContent(data)
|
||||
}
|
||||
st := llb.Scratch().Dir(sourcePath).File(llb.Mkfile(f, 0755, []byte(data)))
|
||||
|
||||
mount := llb.AddMount(destPath, st, llb.SourcePath(sourcePath), llb.Readonly)
|
||||
opt = append(opt, mount)
|
||||
|
||||
args[0] = path.Join(destPath, f)
|
||||
} else {
|
||||
// Just a simple heredoc, so just run the contents in the
|
||||
// shell: this creates the effect of a "fake"-heredoc, so that
|
||||
// the syntax can still be used for shells that don't support
|
||||
// heredocs directly.
|
||||
// NOTE: like above, we ignore the expand option.
|
||||
data := c.Files[0].Data
|
||||
if c.Files[0].Chomp {
|
||||
data = parser.ChompHeredocContent(data)
|
||||
}
|
||||
args[0] = data
|
||||
}
|
||||
} else {
|
||||
// More complex heredoc, so reconstitute it, and pass it to the
|
||||
// shell to handle.
|
||||
for _, file := range c.Files {
|
||||
args[0] += "\n" + file.Data + file.Name
|
||||
}
|
||||
}
|
||||
}
|
||||
if c.PrependShell {
|
||||
args = withShell(d.image, args)
|
||||
}
|
||||
|
||||
env, err := d.state.Env(context.TODO())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opt := []llb.RunOption{llb.Args(args), dfCmd(c), location(dopt.sourceMap, c.Location())}
|
||||
opt = append(opt, llb.Args(args), dfCmd(c), location(dopt.sourceMap, c.Location()))
|
||||
if d.ignoreCache {
|
||||
opt = append(opt, llb.IgnoreCache)
|
||||
}
|
||||
|
@ -735,12 +783,12 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo
|
|||
}
|
||||
|
||||
func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint fmt.Stringer, chown string, chmod string, loc []parser.Range, opt dispatchOpt) error {
|
||||
pp, err := pathRelativeToWorkingDir(d.state, c.Dest())
|
||||
pp, err := pathRelativeToWorkingDir(d.state, c.DestPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dest := path.Join("/", pp)
|
||||
if c.Dest() == "." || c.Dest() == "" || c.Dest()[len(c.Dest())-1] == filepath.Separator {
|
||||
if c.DestPath == "." || c.DestPath == "" || c.DestPath[len(c.DestPath)-1] == filepath.Separator {
|
||||
dest += string(filepath.Separator)
|
||||
}
|
||||
|
||||
|
@ -768,7 +816,7 @@ func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceS
|
|||
|
||||
var a *llb.FileAction
|
||||
|
||||
for _, src := range c.Sources() {
|
||||
for _, src := range c.SourcePaths {
|
||||
commitMessage.WriteString(" " + src)
|
||||
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
|
||||
if !isAddCommand {
|
||||
|
@ -791,6 +839,7 @@ func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceS
|
|||
st := llb.HTTP(src, llb.Filename(f), dfCmd(c))
|
||||
|
||||
opts := append([]llb.CopyOption{&llb.CopyInfo{
|
||||
Mode: mode,
|
||||
CreateDestPath: true,
|
||||
}}, copyOpt...)
|
||||
|
||||
|
@ -818,7 +867,24 @@ func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceS
|
|||
}
|
||||
}
|
||||
|
||||
commitMessage.WriteString(" " + c.Dest())
|
||||
for _, src := range c.SourceContents {
|
||||
data := src.Data
|
||||
f := src.Path
|
||||
st := llb.Scratch().Dir("/").File(llb.Mkfile(f, 0664, []byte(data)))
|
||||
|
||||
opts := append([]llb.CopyOption{&llb.CopyInfo{
|
||||
Mode: mode,
|
||||
CreateDestPath: true,
|
||||
}}, copyOpt...)
|
||||
|
||||
if a == nil {
|
||||
a = llb.Copy(st, f, dest, opts...)
|
||||
} else {
|
||||
a = a.Copy(st, f, dest, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
commitMessage.WriteString(" " + c.DestPath)
|
||||
|
||||
platform := opt.targetPlatform
|
||||
if d.platform != nil {
|
||||
|
@ -847,6 +913,10 @@ func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState l
|
|||
return dispatchCopyFileOp(d, c, sourceState, isAddCommand, cmdToPrint, chown, chmod, loc, opt)
|
||||
}
|
||||
|
||||
if len(c.SourceContents) > 0 {
|
||||
return errors.New("inline content copy is not supported")
|
||||
}
|
||||
|
||||
if chmod != "" {
|
||||
if opt.llbCaps != nil && opt.llbCaps.Supports(pb.CapFileBase) != nil {
|
||||
return errors.Wrap(opt.llbCaps.Supports(pb.CapFileBase), "chmod is not supported")
|
||||
|
@ -855,18 +925,18 @@ func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState l
|
|||
}
|
||||
|
||||
img := llb.Image(opt.copyImage, llb.MarkImageInternal, llb.Platform(opt.buildPlatforms[0]), WithInternalName("helper image for file operations"))
|
||||
pp, err := pathRelativeToWorkingDir(d.state, c.Dest())
|
||||
pp, err := pathRelativeToWorkingDir(d.state, c.DestPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dest := path.Join(".", pp)
|
||||
if c.Dest() == "." || c.Dest() == "" || c.Dest()[len(c.Dest())-1] == filepath.Separator {
|
||||
if c.DestPath == "." || c.DestPath == "" || c.DestPath[len(c.DestPath)-1] == filepath.Separator {
|
||||
dest += string(filepath.Separator)
|
||||
}
|
||||
args := []string{"copy"}
|
||||
unpack := isAddCommand
|
||||
|
||||
mounts := make([]llb.RunOption, 0, len(c.Sources()))
|
||||
mounts := make([]llb.RunOption, 0, len(c.SourcePaths))
|
||||
if chown != "" {
|
||||
args = append(args, fmt.Sprintf("--chown=%s", chown))
|
||||
_, _, err := parseUser(chown)
|
||||
|
@ -883,7 +953,7 @@ func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState l
|
|||
commitMessage.WriteString("COPY")
|
||||
}
|
||||
|
||||
for i, src := range c.Sources() {
|
||||
for i, src := range c.SourcePaths {
|
||||
commitMessage.WriteString(" " + src)
|
||||
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
|
||||
if !isAddCommand {
|
||||
|
@ -920,7 +990,7 @@ func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState l
|
|||
}
|
||||
}
|
||||
|
||||
commitMessage.WriteString(" " + c.Dest())
|
||||
commitMessage.WriteString(" " + c.DestPath)
|
||||
|
||||
args = append(args, dest)
|
||||
if unpack {
|
||||
|
@ -1338,6 +1408,10 @@ func proxyEnvFromBuildArgs(args map[string]string) *llb.ProxyEnv {
|
|||
pe.NoProxy = v
|
||||
isNil = false
|
||||
}
|
||||
if strings.EqualFold(k, "all_proxy") {
|
||||
pe.AllProxy = v
|
||||
isNil = false
|
||||
}
|
||||
}
|
||||
if isNil {
|
||||
return nil
|
||||
|
|
14
vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go
generated
vendored
14
vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go
generated
vendored
|
@ -20,12 +20,14 @@ func detectRunMount(cmd *command, allDispatchStates *dispatchStates) bool {
|
|||
mounts := instructions.GetMounts(c)
|
||||
sources := make([]*dispatchState, len(mounts))
|
||||
for i, mount := range mounts {
|
||||
if mount.From == "" && mount.Type == instructions.MountTypeCache {
|
||||
mount.From = emptyImageName
|
||||
}
|
||||
from := mount.From
|
||||
if from == "" || mount.Type == instructions.MountTypeTmpfs {
|
||||
continue
|
||||
var from string
|
||||
if mount.From == "" {
|
||||
// this might not be accurate because the type might not have a real source (tmpfs for instance),
|
||||
// but since this is just for creating the sources map it should be ok (we don't want to check the value of
|
||||
// mount.Type because it might be a variable)
|
||||
from = emptyImageName
|
||||
} else {
|
||||
from = mount.From
|
||||
}
|
||||
stn, ok := allDispatchStates.findStateByName(from)
|
||||
if !ok {
|
||||
|
|
69
vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go
generated
vendored
69
vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go
generated
vendored
|
@ -165,19 +165,45 @@ func (c *LabelCommand) Expand(expander SingleWordExpander) error {
|
|||
return expandKvpsInPlace(c.Labels, expander)
|
||||
}
|
||||
|
||||
// SourcesAndDest represent a list of source files and a destination
|
||||
type SourcesAndDest []string
|
||||
|
||||
// Sources list the source paths
|
||||
func (s SourcesAndDest) Sources() []string {
|
||||
res := make([]string, len(s)-1)
|
||||
copy(res, s[:len(s)-1])
|
||||
return res
|
||||
// SourceContent represents an anonymous file object
|
||||
type SourceContent struct {
|
||||
Path string
|
||||
Data string
|
||||
Expand bool
|
||||
}
|
||||
|
||||
// Dest path of the operation
|
||||
func (s SourcesAndDest) Dest() string {
|
||||
return s[len(s)-1]
|
||||
// SourcesAndDest represent a collection of sources and a destination
|
||||
type SourcesAndDest struct {
|
||||
DestPath string
|
||||
SourcePaths []string
|
||||
SourceContents []SourceContent
|
||||
}
|
||||
|
||||
func (s *SourcesAndDest) Expand(expander SingleWordExpander) error {
|
||||
for i, content := range s.SourceContents {
|
||||
if !content.Expand {
|
||||
continue
|
||||
}
|
||||
|
||||
expandedData, err := expander(content.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.SourceContents[i].Data = expandedData
|
||||
}
|
||||
|
||||
err := expandSliceInPlace(s.SourcePaths, expander)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
expandedDestPath, err := expander(s.DestPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.DestPath = expandedDestPath
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddCommand : ADD foo /path
|
||||
|
@ -199,7 +225,8 @@ func (c *AddCommand) Expand(expander SingleWordExpander) error {
|
|||
return err
|
||||
}
|
||||
c.Chown = expandedChown
|
||||
return expandSliceInPlace(c.SourcesAndDest, expander)
|
||||
|
||||
return c.SourcesAndDest.Expand(expander)
|
||||
}
|
||||
|
||||
// CopyCommand : COPY foo /path
|
||||
|
@ -221,7 +248,8 @@ func (c *CopyCommand) Expand(expander SingleWordExpander) error {
|
|||
return err
|
||||
}
|
||||
c.Chown = expandedChown
|
||||
return expandSliceInPlace(c.SourcesAndDest, expander)
|
||||
|
||||
return c.SourcesAndDest.Expand(expander)
|
||||
}
|
||||
|
||||
// OnbuildCommand : ONBUILD <some other command>
|
||||
|
@ -249,9 +277,17 @@ func (c *WorkdirCommand) Expand(expander SingleWordExpander) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ShellInlineFile represents an inline file created for a shell command
|
||||
type ShellInlineFile struct {
|
||||
Name string
|
||||
Data string
|
||||
Chomp bool
|
||||
}
|
||||
|
||||
// ShellDependantCmdLine represents a cmdline optionally prepended with the shell
|
||||
type ShellDependantCmdLine struct {
|
||||
CmdLine strslice.StrSlice
|
||||
Files []ShellInlineFile
|
||||
PrependShell bool
|
||||
}
|
||||
|
||||
|
@ -272,6 +308,13 @@ type RunCommand struct {
|
|||
FlagsUsed []string
|
||||
}
|
||||
|
||||
func (c *RunCommand) Expand(expander SingleWordExpander) error {
|
||||
if err := setMountState(c, expander); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CmdCommand : CMD foo
|
||||
//
|
||||
// Set the default command to run in the container (which may be empty).
|
||||
|
|
26
vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go
generated
vendored
26
vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go
generated
vendored
|
@ -2,6 +2,7 @@ package instructions
|
|||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
|
@ -64,13 +65,17 @@ func runMountPreHook(cmd *RunCommand, req parseRequest) error {
|
|||
}
|
||||
|
||||
func runMountPostHook(cmd *RunCommand, req parseRequest) error {
|
||||
return setMountState(cmd, nil)
|
||||
}
|
||||
|
||||
func setMountState(cmd *RunCommand, expander SingleWordExpander) error {
|
||||
st := getMountState(cmd)
|
||||
if st == nil {
|
||||
return errors.Errorf("no mount state")
|
||||
}
|
||||
var mounts []*Mount
|
||||
for _, str := range st.flag.StringValues {
|
||||
m, err := parseMount(str)
|
||||
m, err := parseMount(str, expander)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -111,7 +116,7 @@ type Mount struct {
|
|||
GID *uint64
|
||||
}
|
||||
|
||||
func parseMount(value string) (*Mount, error) {
|
||||
func parseMount(value string, expander SingleWordExpander) (*Mount, error) {
|
||||
csvReader := csv.NewReader(strings.NewReader(value))
|
||||
fields, err := csvReader.Read()
|
||||
if err != nil {
|
||||
|
@ -151,6 +156,23 @@ func parseMount(value string) (*Mount, error) {
|
|||
}
|
||||
|
||||
value := parts[1]
|
||||
// check for potential variable
|
||||
if expander != nil {
|
||||
processed, err := expander(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
value = processed
|
||||
} else if key == "from" {
|
||||
if matched, err := regexp.MatchString(`\$.`, value); err != nil { //nolint
|
||||
return nil, err
|
||||
} else if matched {
|
||||
return nil, errors.Errorf("'%s' doesn't support variable expansion, define alias stage instead", key)
|
||||
}
|
||||
} else {
|
||||
// if we don't have an expander, defer evaluation to later
|
||||
continue
|
||||
}
|
||||
switch key {
|
||||
case "type":
|
||||
if !isValidMountType(strings.ToLower(value)) {
|
||||
|
|
106
vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go
generated
vendored
106
vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go
generated
vendored
|
@ -18,6 +18,7 @@ import (
|
|||
type parseRequest struct {
|
||||
command string
|
||||
args []string
|
||||
heredocs []parser.Heredoc
|
||||
attributes map[string]bool
|
||||
flags *BFlags
|
||||
original string
|
||||
|
@ -47,6 +48,7 @@ func newParseRequestFromNode(node *parser.Node) parseRequest {
|
|||
return parseRequest{
|
||||
command: node.Value,
|
||||
args: nodeArgs(node),
|
||||
heredocs: node.Heredocs,
|
||||
attributes: node.Attributes,
|
||||
original: node.Original,
|
||||
flags: NewBFlagsWithArgs(node.Flags),
|
||||
|
@ -236,6 +238,45 @@ func parseLabel(req parseRequest) (*LabelCommand, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func parseSourcesAndDest(req parseRequest, command string) (*SourcesAndDest, error) {
|
||||
srcs := req.args[:len(req.args)-1]
|
||||
dest := req.args[len(req.args)-1]
|
||||
if heredoc := parser.MustParseHeredoc(dest); heredoc != nil {
|
||||
return nil, errBadHeredoc(command, "a destination")
|
||||
}
|
||||
|
||||
heredocLookup := make(map[string]parser.Heredoc)
|
||||
for _, heredoc := range req.heredocs {
|
||||
heredocLookup[heredoc.Name] = heredoc
|
||||
}
|
||||
|
||||
var sourcePaths []string
|
||||
var sourceContents []SourceContent
|
||||
for _, src := range srcs {
|
||||
if heredoc := parser.MustParseHeredoc(src); heredoc != nil {
|
||||
content := heredocLookup[heredoc.Name].Content
|
||||
if heredoc.Chomp {
|
||||
content = parser.ChompHeredocContent(content)
|
||||
}
|
||||
sourceContents = append(sourceContents,
|
||||
SourceContent{
|
||||
Data: content,
|
||||
Path: heredoc.Name,
|
||||
Expand: heredoc.Expand,
|
||||
},
|
||||
)
|
||||
} else {
|
||||
sourcePaths = append(sourcePaths, src)
|
||||
}
|
||||
}
|
||||
|
||||
return &SourcesAndDest{
|
||||
DestPath: dest,
|
||||
SourcePaths: sourcePaths,
|
||||
SourceContents: sourceContents,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseAdd(req parseRequest) (*AddCommand, error) {
|
||||
if len(req.args) < 2 {
|
||||
return nil, errNoDestinationArgument("ADD")
|
||||
|
@ -245,9 +286,15 @@ func parseAdd(req parseRequest) (*AddCommand, error) {
|
|||
if err := req.flags.Parse(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sourcesAndDest, err := parseSourcesAndDest(req, "ADD")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &AddCommand{
|
||||
SourcesAndDest: SourcesAndDest(req.args),
|
||||
withNameAndCode: newWithNameAndCode(req),
|
||||
SourcesAndDest: *sourcesAndDest,
|
||||
Chown: flChown.Value,
|
||||
Chmod: flChmod.Value,
|
||||
}, nil
|
||||
|
@ -263,10 +310,16 @@ func parseCopy(req parseRequest) (*CopyCommand, error) {
|
|||
if err := req.flags.Parse(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sourcesAndDest, err := parseSourcesAndDest(req, "COPY")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &CopyCommand{
|
||||
SourcesAndDest: SourcesAndDest(req.args),
|
||||
From: flFrom.Value,
|
||||
withNameAndCode: newWithNameAndCode(req),
|
||||
SourcesAndDest: *sourcesAndDest,
|
||||
From: flFrom.Value,
|
||||
Chown: flChown.Value,
|
||||
Chmod: flChmod.Value,
|
||||
}, nil
|
||||
|
@ -351,7 +404,17 @@ func parseWorkdir(req parseRequest) (*WorkdirCommand, error) {
|
|||
|
||||
}
|
||||
|
||||
func parseShellDependentCommand(req parseRequest, emptyAsNil bool) ShellDependantCmdLine {
|
||||
func parseShellDependentCommand(req parseRequest, command string, emptyAsNil bool) (ShellDependantCmdLine, error) {
|
||||
var files []ShellInlineFile
|
||||
for _, heredoc := range req.heredocs {
|
||||
file := ShellInlineFile{
|
||||
Name: heredoc.Name,
|
||||
Data: heredoc.Content,
|
||||
Chomp: heredoc.Chomp,
|
||||
}
|
||||
files = append(files, file)
|
||||
}
|
||||
|
||||
args := handleJSONArgs(req.args, req.attributes)
|
||||
cmd := strslice.StrSlice(args)
|
||||
if emptyAsNil && len(cmd) == 0 {
|
||||
|
@ -359,8 +422,9 @@ func parseShellDependentCommand(req parseRequest, emptyAsNil bool) ShellDependan
|
|||
}
|
||||
return ShellDependantCmdLine{
|
||||
CmdLine: cmd,
|
||||
Files: files,
|
||||
PrependShell: !req.attributes["json"],
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseRun(req parseRequest) (*RunCommand, error) {
|
||||
|
@ -376,7 +440,13 @@ func parseRun(req parseRequest) (*RunCommand, error) {
|
|||
return nil, err
|
||||
}
|
||||
cmd.FlagsUsed = req.flags.Used()
|
||||
cmd.ShellDependantCmdLine = parseShellDependentCommand(req, false)
|
||||
|
||||
cmdline, err := parseShellDependentCommand(req, "RUN", false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cmd.ShellDependantCmdLine = cmdline
|
||||
|
||||
cmd.withNameAndCode = newWithNameAndCode(req)
|
||||
|
||||
for _, fn := range parseRunPostHooks {
|
||||
|
@ -392,11 +462,16 @@ func parseCmd(req parseRequest) (*CmdCommand, error) {
|
|||
if err := req.flags.Parse(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cmdline, err := parseShellDependentCommand(req, "CMD", false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &CmdCommand{
|
||||
ShellDependantCmdLine: parseShellDependentCommand(req, false),
|
||||
ShellDependantCmdLine: cmdline,
|
||||
withNameAndCode: newWithNameAndCode(req),
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func parseEntrypoint(req parseRequest) (*EntrypointCommand, error) {
|
||||
|
@ -404,12 +479,15 @@ func parseEntrypoint(req parseRequest) (*EntrypointCommand, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
cmd := &EntrypointCommand{
|
||||
ShellDependantCmdLine: parseShellDependentCommand(req, true),
|
||||
withNameAndCode: newWithNameAndCode(req),
|
||||
cmdline, err := parseShellDependentCommand(req, "ENTRYPOINT", true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cmd, nil
|
||||
return &EntrypointCommand{
|
||||
ShellDependantCmdLine: cmdline,
|
||||
withNameAndCode: newWithNameAndCode(req),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseOptInterval(flag) is the duration of flag.Value, or 0 if
|
||||
|
@ -651,6 +729,10 @@ func errNoDestinationArgument(command string) error {
|
|||
return errors.Errorf("%s requires at least two arguments, but only one was provided. Destination could not be determined.", command)
|
||||
}
|
||||
|
||||
func errBadHeredoc(command string, option string) error {
|
||||
return errors.Errorf("%s cannot accept a heredoc as %s", command, option)
|
||||
}
|
||||
|
||||
func errBlankCommandNames(command string) error {
|
||||
return errors.Errorf("%s names can not be blank", command)
|
||||
}
|
||||
|
|
153
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go
generated
vendored
153
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go
generated
vendored
|
@ -12,6 +12,7 @@ import (
|
|||
"unicode"
|
||||
|
||||
"github.com/moby/buildkit/frontend/dockerfile/command"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/shell"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
@ -31,6 +32,7 @@ type Node struct {
|
|||
Value string // actual content
|
||||
Next *Node // the next item in the current sexp
|
||||
Children []*Node // the children of this sexp
|
||||
Heredocs []Heredoc // extra heredoc content attachments
|
||||
Attributes map[string]bool // special attributes for this node
|
||||
Original string // original line used before parsing
|
||||
Flags []string // only top Node should have this set
|
||||
|
@ -74,6 +76,17 @@ func (node *Node) lines(start, end int) {
|
|||
node.EndLine = end
|
||||
}
|
||||
|
||||
func (node *Node) canContainHeredoc() bool {
|
||||
if _, allowedDirective := heredocDirectives[node.Value]; !allowedDirective {
|
||||
return false
|
||||
}
|
||||
if _, isJSON := node.Attributes["json"]; isJSON {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// AddChild adds a new child node, and updates line information
|
||||
func (node *Node) AddChild(child *Node, startLine, endLine int) {
|
||||
child.lines(startLine, endLine)
|
||||
|
@ -84,11 +97,22 @@ func (node *Node) AddChild(child *Node, startLine, endLine int) {
|
|||
node.Children = append(node.Children, child)
|
||||
}
|
||||
|
||||
type Heredoc struct {
|
||||
Name string
|
||||
FileDescriptor uint
|
||||
Expand bool
|
||||
Chomp bool
|
||||
Content string
|
||||
}
|
||||
|
||||
var (
|
||||
dispatch map[string]func(string, *directives) (*Node, map[string]bool, error)
|
||||
reWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`)
|
||||
reDirectives = regexp.MustCompile(`^#\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`)
|
||||
reComment = regexp.MustCompile(`^#.*$`)
|
||||
dispatch map[string]func(string, *directives) (*Node, map[string]bool, error)
|
||||
heredocDirectives map[string]bool
|
||||
reWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`)
|
||||
reDirectives = regexp.MustCompile(`^#\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`)
|
||||
reComment = regexp.MustCompile(`^#.*$`)
|
||||
reHeredoc = regexp.MustCompile(`^(\d*)<<(-?)(['"]?)([a-zA-Z][a-zA-Z0-9]*)(['"]?)$`)
|
||||
reLeadingTabs = regexp.MustCompile(`(?m)^\t+`)
|
||||
)
|
||||
|
||||
// DefaultEscapeToken is the default escape token
|
||||
|
@ -252,6 +276,7 @@ func Parse(rwc io.Reader) (*Result, error) {
|
|||
currentLine := 0
|
||||
root := &Node{StartLine: -1}
|
||||
scanner := bufio.NewScanner(rwc)
|
||||
scanner.Split(scanLines)
|
||||
warnings := []string{}
|
||||
var comments []string
|
||||
|
||||
|
@ -312,8 +337,40 @@ func Parse(rwc io.Reader) (*Result, error) {
|
|||
if err != nil {
|
||||
return nil, withLocation(err, startLine, currentLine)
|
||||
}
|
||||
comments = nil
|
||||
|
||||
if child.canContainHeredoc() {
|
||||
heredocs, err := heredocsFromLine(line)
|
||||
if err != nil {
|
||||
return nil, withLocation(err, startLine, currentLine)
|
||||
}
|
||||
|
||||
for _, heredoc := range heredocs {
|
||||
terminator := []byte(heredoc.Name)
|
||||
terminated := false
|
||||
for scanner.Scan() {
|
||||
bytesRead := scanner.Bytes()
|
||||
currentLine++
|
||||
|
||||
possibleTerminator := trimNewline(bytesRead)
|
||||
if heredoc.Chomp {
|
||||
possibleTerminator = trimLeadingTabs(possibleTerminator)
|
||||
}
|
||||
if bytes.Equal(possibleTerminator, terminator) {
|
||||
terminated = true
|
||||
break
|
||||
}
|
||||
heredoc.Content += string(bytesRead)
|
||||
}
|
||||
if !terminated {
|
||||
return nil, withLocation(errors.New("unterminated heredoc"), startLine, currentLine)
|
||||
}
|
||||
|
||||
child.Heredocs = append(child.Heredocs, heredoc)
|
||||
}
|
||||
}
|
||||
|
||||
root.AddChild(child, startLine, currentLine)
|
||||
comments = nil
|
||||
}
|
||||
|
||||
if len(warnings) > 0 {
|
||||
|
@ -331,20 +388,83 @@ func Parse(rwc io.Reader) (*Result, error) {
|
|||
}, withLocation(handleScannerError(scanner.Err()), currentLine, 0)
|
||||
}
|
||||
|
||||
func heredocFromMatch(match []string) (*Heredoc, error) {
|
||||
if len(match) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
fileDescriptor, _ := strconv.ParseUint(match[1], 10, 0)
|
||||
chomp := match[2] == "-"
|
||||
quoteOpen := match[3]
|
||||
name := match[4]
|
||||
quoteClose := match[5]
|
||||
|
||||
expand := true
|
||||
if quoteOpen != "" || quoteClose != "" {
|
||||
if quoteOpen != quoteClose {
|
||||
return nil, errors.New("quoted heredoc quotes do not match")
|
||||
}
|
||||
expand = false
|
||||
}
|
||||
|
||||
return &Heredoc{
|
||||
Name: name,
|
||||
Expand: expand,
|
||||
Chomp: chomp,
|
||||
FileDescriptor: uint(fileDescriptor),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ParseHeredoc(src string) (*Heredoc, error) {
|
||||
return heredocFromMatch(reHeredoc.FindStringSubmatch(src))
|
||||
}
|
||||
func MustParseHeredoc(src string) *Heredoc {
|
||||
heredoc, _ := ParseHeredoc(src)
|
||||
return heredoc
|
||||
}
|
||||
|
||||
func heredocsFromLine(line string) ([]Heredoc, error) {
|
||||
shlex := shell.NewLex('\\')
|
||||
shlex.RawQuotes = true
|
||||
words, _ := shlex.ProcessWords(line, []string{})
|
||||
|
||||
var docs []Heredoc
|
||||
for _, word := range words {
|
||||
heredoc, err := ParseHeredoc(word)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if heredoc != nil {
|
||||
docs = append(docs, *heredoc)
|
||||
}
|
||||
}
|
||||
return docs, nil
|
||||
}
|
||||
|
||||
func ChompHeredocContent(src string) string {
|
||||
return reLeadingTabs.ReplaceAllString(src, "")
|
||||
}
|
||||
|
||||
func trimComments(src []byte) []byte {
|
||||
return reComment.ReplaceAll(src, []byte{})
|
||||
}
|
||||
|
||||
func trimWhitespace(src []byte) []byte {
|
||||
func trimLeadingWhitespace(src []byte) []byte {
|
||||
return bytes.TrimLeftFunc(src, unicode.IsSpace)
|
||||
}
|
||||
func trimLeadingTabs(src []byte) []byte {
|
||||
return bytes.TrimLeft(src, "\t")
|
||||
}
|
||||
func trimNewline(src []byte) []byte {
|
||||
return bytes.TrimRight(src, "\r\n")
|
||||
}
|
||||
|
||||
func isComment(line []byte) bool {
|
||||
return reComment.Match(trimWhitespace(line))
|
||||
return reComment.Match(trimLeadingWhitespace(trimNewline(line)))
|
||||
}
|
||||
|
||||
func isEmptyContinuationLine(line []byte) bool {
|
||||
return len(trimWhitespace(line)) == 0
|
||||
return len(trimLeadingWhitespace(trimNewline(line))) == 0
|
||||
}
|
||||
|
||||
var utf8bom = []byte{0xEF, 0xBB, 0xBF}
|
||||
|
@ -360,12 +480,27 @@ func trimContinuationCharacter(line string, d *directives) (string, bool) {
|
|||
// TODO: remove stripLeftWhitespace after deprecation period. It seems silly
|
||||
// to preserve whitespace on continuation lines. Why is that done?
|
||||
func processLine(d *directives, token []byte, stripLeftWhitespace bool) ([]byte, error) {
|
||||
token = trimNewline(token)
|
||||
if stripLeftWhitespace {
|
||||
token = trimWhitespace(token)
|
||||
token = trimLeadingWhitespace(token)
|
||||
}
|
||||
return trimComments(token), d.possibleParserDirective(string(token))
|
||||
}
|
||||
|
||||
// Variation of bufio.ScanLines that preserves the line endings
|
||||
func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
if i := bytes.IndexByte(data, '\n'); i >= 0 {
|
||||
return i + 1, data[0 : i+1], nil
|
||||
}
|
||||
if atEOF {
|
||||
return len(data), data, nil
|
||||
}
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
func handleScannerError(err error) error {
|
||||
switch err {
|
||||
case bufio.ErrTooLong:
|
||||
|
|
13
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser_heredoc.go
generated
vendored
Normal file
13
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser_heredoc.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
// +build dfheredoc
|
||||
|
||||
package parser
|
||||
|
||||
import "github.com/moby/buildkit/frontend/dockerfile/command"
|
||||
|
||||
func init() {
|
||||
heredocDirectives = map[string]bool{
|
||||
command.Add: true,
|
||||
command.Copy: true,
|
||||
command.Run: true,
|
||||
}
|
||||
}
|
|
@ -3,13 +3,13 @@ package errdefs
|
|||
import "fmt"
|
||||
|
||||
const (
|
||||
// ContainerdUnknownExitStatus is returned when containerd is unable to
|
||||
// determine the exit status of a process. This can happen if the process never starts
|
||||
// UnknownExitStatus might be returned in (*ExitError).ExitCode via
|
||||
// ContainerProcess.Wait. This can happen if the process never starts
|
||||
// or if an error was encountered when obtaining the exit status, it is set to 255.
|
||||
//
|
||||
// This const is defined here to prevent importing github.com/containerd/containerd
|
||||
// and corresponds with https://github.com/containerd/containerd/blob/40b22ef0741028917761d8c5d5d29e0d19038836/task.go#L52-L55
|
||||
ContainerdUnknownExitStatus = 255
|
||||
UnknownExitStatus = 255
|
||||
)
|
||||
|
||||
// ExitError will be returned when the container process exits with a non-zero
|
5
vendor/github.com/moby/buildkit/frontend/gateway/gateway.go
generated
vendored
5
vendor/github.com/moby/buildkit/frontend/gateway/gateway.go
generated
vendored
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
"github.com/moby/buildkit/frontend"
|
||||
gwclient "github.com/moby/buildkit/frontend/gateway/client"
|
||||
gwerrdefs "github.com/moby/buildkit/frontend/gateway/errdefs"
|
||||
pb "github.com/moby/buildkit/frontend/gateway/pb"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/session"
|
||||
|
@ -1166,10 +1167,10 @@ func (lbf *llbBridgeForwarder) ExecProcess(srv pb.LLBBridge_ExecProcessServer) e
|
|||
err := proc.Wait()
|
||||
|
||||
var statusCode uint32
|
||||
var exitError *errdefs.ExitError
|
||||
var exitError *gwerrdefs.ExitError
|
||||
var statusError *rpc.Status
|
||||
if err != nil {
|
||||
statusCode = errdefs.ContainerdUnknownExitStatus
|
||||
statusCode = gwerrdefs.UnknownExitStatus
|
||||
st, _ := status.FromError(grpcerrors.ToGRPC(err))
|
||||
stp := st.Proto()
|
||||
statusError = &rpc.Status{
|
||||
|
|
4
vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go
generated
vendored
4
vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go
generated
vendored
|
@ -16,9 +16,9 @@ import (
|
|||
"github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/moby/buildkit/frontend/gateway/errdefs"
|
||||
pb "github.com/moby/buildkit/frontend/gateway/pb"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
opspb "github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/apicaps"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
|
@ -882,7 +882,7 @@ func (ctr *container) Start(ctx context.Context, req client.StartRequest) (clien
|
|||
Message: exit.Error.Message,
|
||||
Details: convertGogoAny(exit.Error.Details),
|
||||
}))
|
||||
if exit.Code != errdefs.ContainerdUnknownExitStatus {
|
||||
if exit.Code != errdefs.UnknownExitStatus {
|
||||
exitError = &errdefs.ExitError{ExitCode: exit.Code, Err: exitError}
|
||||
}
|
||||
} else if serverDone := msg.GetDone(); serverDone != nil {
|
||||
|
|
179
vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go
generated
vendored
179
vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go
generated
vendored
|
@ -5577,7 +5577,7 @@ func (m *Result) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -5594,10 +5594,7 @@ func (m *Result) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -5758,7 +5755,7 @@ func (m *RefMapDeprecated) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -5775,10 +5772,7 @@ func (m *RefMapDeprecated) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -5897,10 +5891,7 @@ func (m *Ref) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -6063,7 +6054,7 @@ func (m *RefMap) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -6080,10 +6071,7 @@ func (m *RefMap) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -6206,10 +6194,7 @@ func (m *ReturnRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -6260,10 +6245,7 @@ func (m *ReturnResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -6314,10 +6296,7 @@ func (m *InputsRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -6480,7 +6459,7 @@ func (m *InputsResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -6497,10 +6476,7 @@ func (m *InputsResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -6683,10 +6659,7 @@ func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -6803,10 +6776,7 @@ func (m *ResolveImageConfigResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -7035,7 +7005,7 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -7324,7 +7294,7 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -7361,10 +7331,7 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -7557,7 +7524,7 @@ func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -7574,10 +7541,7 @@ func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -7696,10 +7660,7 @@ func (m *SolveResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -7850,10 +7811,7 @@ func (m *ReadFileRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -7942,10 +7900,7 @@ func (m *FileRange) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -8030,10 +7985,7 @@ func (m *ReadFileResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -8180,10 +8132,7 @@ func (m *ReadDirRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -8268,10 +8217,7 @@ func (m *ReadDirResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -8386,10 +8332,7 @@ func (m *StatFileRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -8476,10 +8419,7 @@ func (m *StatFileResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -8530,10 +8470,7 @@ func (m *PingRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -8686,10 +8623,7 @@ func (m *PongResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -8897,10 +8831,7 @@ func (m *NewContainerRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -8951,10 +8882,7 @@ func (m *NewContainerResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -9037,10 +8965,7 @@ func (m *ReleaseContainerRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -9091,10 +9016,7 @@ func (m *ReleaseContainerResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -9387,10 +9309,7 @@ func (m *ExecMessage) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -9624,10 +9543,7 @@ func (m *InitMessage) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -9733,10 +9649,7 @@ func (m *ExitMessage) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -9787,10 +9700,7 @@ func (m *StartedMessage) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -9841,10 +9751,7 @@ func (m *DoneMessage) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -9968,10 +9875,7 @@ func (m *FdMessage) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -10060,10 +9964,7 @@ func (m *ResizeMessage) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGateway
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
|
75
vendor/github.com/moby/buildkit/go.mod
generated
vendored
75
vendor/github.com/moby/buildkit/go.mod
generated
vendored
|
@ -3,30 +3,30 @@ module github.com/moby/buildkit
|
|||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/AkihiroSuda/containerd-fuse-overlayfs v1.0.0
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/Microsoft/go-winio v0.4.15
|
||||
github.com/Microsoft/hcsshim v0.8.10
|
||||
github.com/Microsoft/go-winio v0.4.17
|
||||
github.com/Microsoft/hcsshim v0.8.16
|
||||
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58 // indirect
|
||||
github.com/containerd/console v1.0.1
|
||||
// containerd: the actual version is replaced in replace()
|
||||
github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc
|
||||
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe
|
||||
github.com/containerd/go-cni v1.0.1
|
||||
github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0
|
||||
github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116
|
||||
github.com/containerd/typeurl v1.0.1
|
||||
github.com/coreos/go-systemd/v22 v22.1.0
|
||||
github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible
|
||||
github.com/containerd/console v1.0.2
|
||||
github.com/containerd/containerd v1.5.2
|
||||
github.com/containerd/continuity v0.1.0
|
||||
github.com/containerd/fuse-overlayfs-snapshotter v1.0.2
|
||||
github.com/containerd/go-cni v1.0.2
|
||||
github.com/containerd/go-runc v1.0.0
|
||||
github.com/containerd/stargz-snapshotter v0.6.4
|
||||
github.com/containerd/typeurl v1.0.2
|
||||
github.com/coreos/go-systemd/v22 v22.3.2
|
||||
github.com/docker/cli v20.10.7+incompatible
|
||||
github.com/docker/distribution v2.7.1+incompatible
|
||||
github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible
|
||||
// docker: the actual version is replaced in replace()
|
||||
github.com/docker/docker v20.10.7+incompatible // master (v21.xx-dev)
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/gofrs/flock v0.7.3
|
||||
github.com/gogo/googleapis v1.3.2
|
||||
github.com/gogo/protobuf v1.3.1
|
||||
github.com/gogo/googleapis v1.4.0
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
// protobuf: the actual version is replaced in replace()
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/google/go-cmp v0.4.1
|
||||
github.com/golang/protobuf v1.4.3
|
||||
github.com/google/go-cmp v0.5.4
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.2.0
|
||||
|
@ -34,53 +34,50 @@ require (
|
|||
github.com/hashicorp/go-immutable-radix v1.0.0
|
||||
github.com/hashicorp/golang-lru v0.5.3
|
||||
github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c // indirect
|
||||
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect
|
||||
github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee // indirect
|
||||
github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea
|
||||
github.com/mitchellh/hashstructure v1.0.0
|
||||
github.com/moby/locker v1.0.1
|
||||
github.com/moby/sys/mount v0.1.1 // indirect; force more current version of sys/mount than go mod selects automatically
|
||||
github.com/moby/sys/mountinfo v0.4.0 // indirect; force more current version of sys/mountinfo than go mod selects automatically
|
||||
github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2 // indirect
|
||||
github.com/moby/sys/mount v0.2.0 // indirect
|
||||
github.com/moby/term v0.0.0-20201110203204-bea5bbe245bf // indirect
|
||||
github.com/morikuni/aec v1.0.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/opencontainers/runc v1.0.0-rc92
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6
|
||||
github.com/opencontainers/runc v1.0.0-rc93
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d
|
||||
github.com/opencontainers/selinux v1.8.0
|
||||
github.com/opentracing-contrib/go-stdlib v1.0.0
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/profile v1.5.0
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
github.com/stretchr/testify v1.5.1
|
||||
github.com/tonistiigi/fsutil v0.0.0-20201103201449-0834f99b7b85
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/tonistiigi/fsutil v0.0.0-20210609172227-d72af97c0eaf
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
|
||||
github.com/uber/jaeger-client-go v2.25.0+incompatible
|
||||
github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
|
||||
github.com/urfave/cli v1.22.2
|
||||
go.etcd.io/bbolt v1.3.5
|
||||
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
|
||||
// genproto: the actual version is replaced in replace()
|
||||
google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece
|
||||
google.golang.org/grpc v1.29.1
|
||||
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a
|
||||
// grpc: the actual version is replaced in replace()
|
||||
google.golang.org/grpc v1.35.0
|
||||
)
|
||||
|
||||
replace (
|
||||
// containerd: Forked from 0edc412565dcc6e3d6125ff9e4b009ad4b89c638 (20201117) with:
|
||||
// - `Adjust overlay tests to expect "index=off"` (#4719, for ease of cherry-picking #5076)
|
||||
// - `overlay: support "userxattr" option (kernel 5.11)` (#5076)
|
||||
// - `docker: avoid concurrent map access panic` (#4855)
|
||||
github.com/containerd/containerd => github.com/AkihiroSuda/containerd v1.1.1-0.20210312044057-48f85a131bb8
|
||||
github.com/docker/docker => github.com/cpuguy83/docker v0.0.0-20210406002447-5616f4544aef
|
||||
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20210609100121-ef4d47340142+incompatible
|
||||
// protobuf: corresponds to containerd
|
||||
github.com/golang/protobuf => github.com/golang/protobuf v1.3.5
|
||||
github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe
|
||||
github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305
|
||||
// genproto: corresponds to containerd
|
||||
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
|
||||
// grpc: corresponds to protobuf
|
||||
google.golang.org/grpc => google.golang.org/grpc v1.30.0
|
||||
)
|
||||
|
|
12
vendor/github.com/moby/buildkit/session/auth/auth.go
generated
vendored
12
vendor/github.com/moby/buildkit/session/auth/auth.go
generated
vendored
|
@ -52,8 +52,8 @@ func CredentialsFunc(sm *session.Manager, g session.Group) func(string) (session
|
|||
}
|
||||
}
|
||||
|
||||
func FetchToken(req *FetchTokenRequest, sm *session.Manager, g session.Group) (resp *FetchTokenResponse, err error) {
|
||||
err = sm.Any(context.TODO(), g, func(ctx context.Context, id string, c session.Caller) error {
|
||||
func FetchToken(ctx context.Context, req *FetchTokenRequest, sm *session.Manager, g session.Group) (resp *FetchTokenResponse, err error) {
|
||||
err = sm.Any(ctx, g, func(ctx context.Context, id string, c session.Caller) error {
|
||||
client := NewAuthClient(c.Conn())
|
||||
|
||||
resp, err = client.FetchToken(ctx, req)
|
||||
|
@ -68,9 +68,9 @@ func FetchToken(req *FetchTokenRequest, sm *session.Manager, g session.Group) (r
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func VerifyTokenAuthority(host string, pubKey *[32]byte, sm *session.Manager, g session.Group) (sessionID string, ok bool, err error) {
|
||||
func VerifyTokenAuthority(ctx context.Context, host string, pubKey *[32]byte, sm *session.Manager, g session.Group) (sessionID string, ok bool, err error) {
|
||||
var verified bool
|
||||
err = sm.Any(context.TODO(), g, func(ctx context.Context, id string, c session.Caller) error {
|
||||
err = sm.Any(ctx, g, func(ctx context.Context, id string, c session.Caller) error {
|
||||
client := NewAuthClient(c.Conn())
|
||||
|
||||
payload := make([]byte, 32)
|
||||
|
@ -100,8 +100,8 @@ func VerifyTokenAuthority(host string, pubKey *[32]byte, sm *session.Manager, g
|
|||
return sessionID, verified, nil
|
||||
}
|
||||
|
||||
func GetTokenAuthority(host string, sm *session.Manager, g session.Group) (sessionID string, pubKey *[32]byte, err error) {
|
||||
err = sm.Any(context.TODO(), g, func(ctx context.Context, id string, c session.Caller) error {
|
||||
func GetTokenAuthority(ctx context.Context, host string, sm *session.Manager, g session.Group) (sessionID string, pubKey *[32]byte, err error) {
|
||||
err = sm.Any(ctx, g, func(ctx context.Context, id string, c session.Caller) error {
|
||||
client := NewAuthClient(c.Conn())
|
||||
|
||||
resp, err := client.GetTokenAuthority(ctx, &GetTokenAuthorityRequest{
|
||||
|
|
40
vendor/github.com/moby/buildkit/session/auth/auth.pb.go
generated
vendored
40
vendor/github.com/moby/buildkit/session/auth/auth.pb.go
generated
vendored
|
@ -1651,10 +1651,7 @@ func (m *CredentialsRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -1768,10 +1765,7 @@ func (m *CredentialsResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -1981,10 +1975,7 @@ func (m *FetchTokenRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -2104,10 +2095,7 @@ func (m *FetchTokenResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -2223,10 +2211,7 @@ func (m *GetTokenAuthorityRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -2310,10 +2295,7 @@ func (m *GetTokenAuthorityResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -2463,10 +2445,7 @@ func (m *VerifyTokenAuthorityRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -2550,10 +2529,7 @@ func (m *VerifyTokenAuthorityResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
|
8
vendor/github.com/moby/buildkit/session/filesync/diffcopy.go
generated
vendored
8
vendor/github.com/moby/buildkit/session/filesync/diffcopy.go
generated
vendored
|
@ -70,7 +70,7 @@ func (wc *streamWriterCloser) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func recvDiffCopy(ds grpc.ClientStream, dest string, cu CacheUpdater, progress progressCb, filter func(string, *fstypes.Stat) bool) error {
|
||||
func recvDiffCopy(ds grpc.ClientStream, dest string, cu CacheUpdater, progress progressCb, filter func(string, *fstypes.Stat) bool) (err error) {
|
||||
st := time.Now()
|
||||
defer func() {
|
||||
logrus.Debugf("diffcopy took: %v", time.Since(st))
|
||||
|
@ -82,6 +82,12 @@ func recvDiffCopy(ds grpc.ClientStream, dest string, cu CacheUpdater, progress p
|
|||
cf = cu.HandleChange
|
||||
ch = cu.ContentHasher()
|
||||
}
|
||||
defer func() {
|
||||
// tracing wrapper requires close trigger even on clean eof
|
||||
if err == nil {
|
||||
ds.CloseSend()
|
||||
}
|
||||
}()
|
||||
return errors.WithStack(fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{
|
||||
NotifyHashed: cf,
|
||||
ContentHasher: ch,
|
||||
|
|
76
vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go
generated
vendored
76
vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go
generated
vendored
|
@ -8,6 +8,7 @@ import (
|
|||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
types "github.com/tonistiigi/fsutil/types"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
|
@ -80,21 +81,25 @@ func init() {
|
|||
func init() { proto.RegisterFile("filesync.proto", fileDescriptor_d1042549f1f24495) }
|
||||
|
||||
var fileDescriptor_d1042549f1f24495 = []byte{
|
||||
// 217 bytes of a gzipped FileDescriptorProto
|
||||
// 281 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0xcb, 0xcc, 0x49,
|
||||
0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa,
|
||||
0xd4, 0x83, 0x0b, 0x96, 0x19, 0x2a, 0x29, 0x71, 0xf1, 0x38, 0x55, 0x96, 0xa4, 0x16, 0xfb, 0xa6,
|
||||
0x16, 0x17, 0x27, 0xa6, 0xa7, 0x0a, 0x09, 0x71, 0xb1, 0xa4, 0x24, 0x96, 0x24, 0x4a, 0x30, 0x2a,
|
||||
0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x46, 0xab, 0x19, 0xb9, 0x38, 0xdc, 0x32, 0x73, 0x52, 0x83,
|
||||
0x2b, 0xf3, 0x92, 0x85, 0xfc, 0xb8, 0x38, 0x5c, 0x32, 0xd3, 0xd2, 0x9c, 0xf3, 0x0b, 0x2a, 0x85,
|
||||
0xe4, 0xf4, 0xd0, 0xcd, 0xd3, 0x43, 0x36, 0x4c, 0x8a, 0x80, 0xbc, 0x06, 0xa3, 0x01, 0xa3, 0x90,
|
||||
0x3f, 0x17, 0x67, 0x48, 0x62, 0x51, 0x70, 0x49, 0x51, 0x6a, 0x62, 0x2e, 0x35, 0x0c, 0x34, 0x8a,
|
||||
0x82, 0x3a, 0x36, 0x35, 0x2f, 0x85, 0xda, 0x8e, 0x75, 0xb2, 0xbb, 0xf0, 0x50, 0x8e, 0xe1, 0xc6,
|
||||
0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78,
|
||||
0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7,
|
||||
0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c,
|
||||
0xc7, 0x10, 0xc5, 0x01, 0x33, 0x33, 0x89, 0x0d, 0x1c, 0x0d, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff,
|
||||
0xff, 0x5e, 0xce, 0x52, 0xb3, 0x98, 0x01, 0x00, 0x00,
|
||||
0xd4, 0x83, 0x0b, 0x96, 0x19, 0x4a, 0xe9, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7,
|
||||
0xe7, 0xea, 0x97, 0xe4, 0xe7, 0x65, 0x16, 0x97, 0x64, 0x66, 0xa6, 0x67, 0xea, 0xa7, 0x15, 0x97,
|
||||
0x96, 0x64, 0xe6, 0xe8, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x97, 0x67, 0x16, 0xa5, 0x42, 0x0c,
|
||||
0x50, 0x52, 0xe2, 0xe2, 0x71, 0xaa, 0x2c, 0x49, 0x2d, 0xf6, 0x4d, 0x2d, 0x2e, 0x4e, 0x4c, 0x4f,
|
||||
0x15, 0x12, 0xe2, 0x62, 0x49, 0x49, 0x2c, 0x49, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02,
|
||||
0xb3, 0x8d, 0x9a, 0x19, 0xb9, 0x38, 0xdc, 0x32, 0x73, 0x52, 0x83, 0x2b, 0xf3, 0x92, 0x85, 0xac,
|
||||
0xb8, 0x38, 0x5c, 0x32, 0xd3, 0xd2, 0x9c, 0xf3, 0x0b, 0x2a, 0x85, 0x44, 0xf4, 0x20, 0xc6, 0xea,
|
||||
0x81, 0x8d, 0xd5, 0x0b, 0x48, 0x4c, 0xce, 0x4e, 0x2d, 0x91, 0xc2, 0x2a, 0xaa, 0xc1, 0x68, 0xc0,
|
||||
0x28, 0x64, 0xcd, 0xc5, 0x19, 0x92, 0x58, 0x14, 0x5c, 0x52, 0x94, 0x9a, 0x98, 0x4b, 0xaa, 0x66,
|
||||
0xa3, 0x28, 0xa8, 0x23, 0x52, 0xf3, 0x52, 0x84, 0xfc, 0x90, 0x1c, 0x21, 0xa7, 0x87, 0x1e, 0x06,
|
||||
0x7a, 0xc8, 0x3e, 0x92, 0x22, 0x20, 0x0f, 0x32, 0xdb, 0xc9, 0xee, 0xc2, 0x43, 0x39, 0x86, 0x1b,
|
||||
0x0f, 0xe5, 0x18, 0x3e, 0x3c, 0x94, 0x63, 0x6c, 0x78, 0x24, 0xc7, 0xb8, 0xe2, 0x91, 0x1c, 0xe3,
|
||||
0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0xf8, 0xe2, 0x91, 0x1c,
|
||||
0xc3, 0x87, 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1,
|
||||
0x1c, 0x43, 0x14, 0x07, 0xcc, 0xcc, 0x24, 0x36, 0x70, 0x60, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff,
|
||||
0xff, 0xe6, 0x17, 0x63, 0x59, 0x9f, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (this *BytesMessage) Equal(that interface{}) bool {
|
||||
|
@ -174,8 +179,8 @@ func (c *fileSyncClient) DiffCopy(ctx context.Context, opts ...grpc.CallOption)
|
|||
}
|
||||
|
||||
type FileSync_DiffCopyClient interface {
|
||||
Send(*BytesMessage) error
|
||||
Recv() (*BytesMessage, error)
|
||||
Send(*types.Packet) error
|
||||
Recv() (*types.Packet, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
|
@ -183,12 +188,12 @@ type fileSyncDiffCopyClient struct {
|
|||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *fileSyncDiffCopyClient) Send(m *BytesMessage) error {
|
||||
func (x *fileSyncDiffCopyClient) Send(m *types.Packet) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *fileSyncDiffCopyClient) Recv() (*BytesMessage, error) {
|
||||
m := new(BytesMessage)
|
||||
func (x *fileSyncDiffCopyClient) Recv() (*types.Packet, error) {
|
||||
m := new(types.Packet)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -205,8 +210,8 @@ func (c *fileSyncClient) TarStream(ctx context.Context, opts ...grpc.CallOption)
|
|||
}
|
||||
|
||||
type FileSync_TarStreamClient interface {
|
||||
Send(*BytesMessage) error
|
||||
Recv() (*BytesMessage, error)
|
||||
Send(*types.Packet) error
|
||||
Recv() (*types.Packet, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
|
@ -214,12 +219,12 @@ type fileSyncTarStreamClient struct {
|
|||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *fileSyncTarStreamClient) Send(m *BytesMessage) error {
|
||||
func (x *fileSyncTarStreamClient) Send(m *types.Packet) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *fileSyncTarStreamClient) Recv() (*BytesMessage, error) {
|
||||
m := new(BytesMessage)
|
||||
func (x *fileSyncTarStreamClient) Recv() (*types.Packet, error) {
|
||||
m := new(types.Packet)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -252,8 +257,8 @@ func _FileSync_DiffCopy_Handler(srv interface{}, stream grpc.ServerStream) error
|
|||
}
|
||||
|
||||
type FileSync_DiffCopyServer interface {
|
||||
Send(*BytesMessage) error
|
||||
Recv() (*BytesMessage, error)
|
||||
Send(*types.Packet) error
|
||||
Recv() (*types.Packet, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
|
@ -261,12 +266,12 @@ type fileSyncDiffCopyServer struct {
|
|||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *fileSyncDiffCopyServer) Send(m *BytesMessage) error {
|
||||
func (x *fileSyncDiffCopyServer) Send(m *types.Packet) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *fileSyncDiffCopyServer) Recv() (*BytesMessage, error) {
|
||||
m := new(BytesMessage)
|
||||
func (x *fileSyncDiffCopyServer) Recv() (*types.Packet, error) {
|
||||
m := new(types.Packet)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -278,8 +283,8 @@ func _FileSync_TarStream_Handler(srv interface{}, stream grpc.ServerStream) erro
|
|||
}
|
||||
|
||||
type FileSync_TarStreamServer interface {
|
||||
Send(*BytesMessage) error
|
||||
Recv() (*BytesMessage, error)
|
||||
Send(*types.Packet) error
|
||||
Recv() (*types.Packet, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
|
@ -287,12 +292,12 @@ type fileSyncTarStreamServer struct {
|
|||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *fileSyncTarStreamServer) Send(m *BytesMessage) error {
|
||||
func (x *fileSyncTarStreamServer) Send(m *types.Packet) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *fileSyncTarStreamServer) Recv() (*BytesMessage, error) {
|
||||
m := new(BytesMessage)
|
||||
func (x *fileSyncTarStreamServer) Recv() (*types.Packet, error) {
|
||||
m := new(types.Packet)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -571,10 +576,7 @@ func (m *BytesMessage) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthFilesync
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthFilesync
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
|
8
vendor/github.com/moby/buildkit/session/filesync/filesync.proto
generated
vendored
8
vendor/github.com/moby/buildkit/session/filesync/filesync.proto
generated
vendored
|
@ -4,9 +4,11 @@ package moby.filesync.v1;
|
|||
|
||||
option go_package = "filesync";
|
||||
|
||||
import "github.com/tonistiigi/fsutil/types/wire.proto";
|
||||
|
||||
service FileSync{
|
||||
rpc DiffCopy(stream BytesMessage) returns (stream BytesMessage);
|
||||
rpc TarStream(stream BytesMessage) returns (stream BytesMessage);
|
||||
rpc DiffCopy(stream fsutil.types.Packet) returns (stream fsutil.types.Packet);
|
||||
rpc TarStream(stream fsutil.types.Packet) returns (stream fsutil.types.Packet);
|
||||
}
|
||||
|
||||
service FileSend{
|
||||
|
@ -17,4 +19,4 @@ service FileSend{
|
|||
// BytesMessage contains a chunk of byte data
|
||||
message BytesMessage{
|
||||
bytes data = 1;
|
||||
}
|
||||
}
|
||||
|
|
2
vendor/github.com/moby/buildkit/session/filesync/generate.go
generated
vendored
2
vendor/github.com/moby/buildkit/session/filesync/generate.go
generated
vendored
|
@ -1,3 +1,3 @@
|
|||
package filesync
|
||||
|
||||
//go:generate protoc --gogoslick_out=plugins=grpc:. filesync.proto
|
||||
//go:generate protoc -I=. -I=../../vendor/ -I=../../vendor/github.com/tonistiigi/fsutil/types/ --gogoslick_out=plugins=grpc:. filesync.proto
|
||||
|
|
12
vendor/github.com/moby/buildkit/session/secrets/secrets.pb.go
generated
vendored
12
vendor/github.com/moby/buildkit/session/secrets/secrets.pb.go
generated
vendored
|
@ -678,7 +678,7 @@ func (m *GetSecretRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthSecrets
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -695,10 +695,7 @@ func (m *GetSecretRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthSecrets
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthSecrets
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -782,10 +779,7 @@ func (m *GetSecretResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthSecrets
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthSecrets
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
|
15
vendor/github.com/moby/buildkit/session/sshforward/ssh.pb.go
generated
vendored
15
vendor/github.com/moby/buildkit/session/sshforward/ssh.pb.go
generated
vendored
|
@ -676,10 +676,7 @@ func (m *BytesMessage) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthSsh
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthSsh
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -761,10 +758,7 @@ func (m *CheckAgentRequest) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthSsh
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthSsh
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -814,10 +808,7 @@ func (m *CheckAgentResponse) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthSsh
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthSsh
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
|
5
vendor/github.com/moby/buildkit/session/upload/upload.pb.go
generated
vendored
5
vendor/github.com/moby/buildkit/session/upload/upload.pb.go
generated
vendored
|
@ -397,10 +397,7 @@ func (m *BytesMessage) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthUpload
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthUpload
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
|
16
vendor/github.com/moby/buildkit/solver/edge.go
generated
vendored
16
vendor/github.com/moby/buildkit/solver/edge.go
generated
vendored
|
@ -939,6 +939,22 @@ func (e *edge) execOp(ctx context.Context) (interface{}, error) {
|
|||
return NewCachedResult(res, ek), nil
|
||||
}
|
||||
|
||||
func (e *edge) isDep(e2 *edge) bool {
|
||||
return isDep(e.edge.Vertex, e2.edge.Vertex)
|
||||
}
|
||||
|
||||
func isDep(vtx, vtx2 Vertex) bool {
|
||||
if vtx.Digest() == vtx2.Digest() {
|
||||
return true
|
||||
}
|
||||
for _, e := range vtx.Inputs() {
|
||||
if isDep(e.Vertex, vtx2) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func toResultSlice(cres []CachedResult) (out []Result) {
|
||||
out = make([]Result, len(cres))
|
||||
for i := range cres {
|
||||
|
|
10
vendor/github.com/moby/buildkit/solver/jobs.go
generated
vendored
10
vendor/github.com/moby/buildkit/solver/jobs.go
generated
vendored
|
@ -640,13 +640,16 @@ func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, p PreprocessF
|
|||
}
|
||||
if err := s.slowCacheErr[index]; err != nil {
|
||||
s.slowMu.Unlock()
|
||||
return err, nil
|
||||
return nil, err
|
||||
}
|
||||
s.slowMu.Unlock()
|
||||
|
||||
complete := true
|
||||
if p != nil {
|
||||
st := s.st.solver.getState(s.st.vtx.Inputs()[index])
|
||||
if st == nil {
|
||||
return nil, errors.Errorf("failed to get state for index %d on %v", index, s.st.vtx.Name())
|
||||
}
|
||||
ctx2 := opentracing.ContextWithSpan(progress.WithProgress(ctx, st.mpw), st.mspan)
|
||||
err = p(ctx2, res, st)
|
||||
if err != nil {
|
||||
|
@ -763,6 +766,11 @@ func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result,
|
|||
if s.execRes != nil || s.execErr != nil {
|
||||
return s.execRes, s.execErr
|
||||
}
|
||||
release, err := op.Acquire(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "acquire op resources")
|
||||
}
|
||||
defer release()
|
||||
|
||||
ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan)
|
||||
ctx = withAncestorCacheOpts(ctx, s.st)
|
||||
|
|
10
vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go
generated
vendored
10
vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go
generated
vendored
|
@ -67,7 +67,7 @@ func mapUserToChowner(user *copy.User, idmap *idtools.IdentityMapping) (copy.Cho
|
|||
}
|
||||
|
||||
func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *copy.User, idmap *idtools.IdentityMapping) error {
|
||||
p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path)))
|
||||
p, err := fs.RootPath(d, filepath.Join("/", action.Path))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *copy.
|
|||
}
|
||||
|
||||
func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *copy.User, idmap *idtools.IdentityMapping) error {
|
||||
p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path)))
|
||||
p, err := fs.RootPath(d, filepath.Join("/", action.Path))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -146,7 +146,7 @@ func rm(ctx context.Context, d string, action pb.FileActionRm) error {
|
|||
}
|
||||
|
||||
func rmPath(root, src string, allowNotFound bool) error {
|
||||
p, err := fs.RootPath(root, filepath.Join(filepath.Join("/", src)))
|
||||
p, err := fs.RootPath(root, filepath.Join("/", src))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *
|
|||
destPath := cleanPath(action.Dest)
|
||||
|
||||
if !action.CreateDestPath {
|
||||
p, err := fs.RootPath(dest, filepath.Join(filepath.Join("/", action.Dest)))
|
||||
p, err := fs.RootPath(dest, filepath.Join("/", action.Dest))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -187,6 +187,8 @@ func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *
|
|||
|
||||
opt := []copy.Opt{
|
||||
func(ci *copy.CopyInfo) {
|
||||
ci.IncludePatterns = action.IncludePatterns
|
||||
ci.ExcludePatterns = action.ExcludePatterns
|
||||
ci.Chown = ch
|
||||
ci.Utime = timestampToTime(action.Timestamp)
|
||||
if m := int(action.Mode); m != -1 {
|
||||
|
|
5
vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go
generated
vendored
5
vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go
generated
vendored
|
@ -141,3 +141,8 @@ func (b *buildOp) Exec(ctx context.Context, g session.Group, inputs []solver.Res
|
|||
|
||||
return []solver.Result{r}, err
|
||||
}
|
||||
|
||||
func (b *buildOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) {
|
||||
// buildOp itself does not count towards parallelism budget.
|
||||
return func() {}, nil
|
||||
}
|
||||
|
|
49
vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go
generated
vendored
49
vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go
generated
vendored
|
@ -29,33 +29,36 @@ import (
|
|||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
const execCacheType = "buildkit.exec.v0"
|
||||
|
||||
type execOp struct {
|
||||
op *pb.ExecOp
|
||||
cm cache.Manager
|
||||
mm *mounts.MountManager
|
||||
exec executor.Executor
|
||||
w worker.Worker
|
||||
platform *pb.Platform
|
||||
numInputs int
|
||||
op *pb.ExecOp
|
||||
cm cache.Manager
|
||||
mm *mounts.MountManager
|
||||
exec executor.Executor
|
||||
w worker.Worker
|
||||
platform *pb.Platform
|
||||
numInputs int
|
||||
parallelism *semaphore.Weighted
|
||||
}
|
||||
|
||||
func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache.Manager, sm *session.Manager, md *metadata.Store, exec executor.Executor, w worker.Worker) (solver.Op, error) {
|
||||
func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache.Manager, parallelism *semaphore.Weighted, sm *session.Manager, md *metadata.Store, exec executor.Executor, w worker.Worker) (solver.Op, error) {
|
||||
if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
name := fmt.Sprintf("exec %s", strings.Join(op.Exec.Meta.Args, " "))
|
||||
return &execOp{
|
||||
op: op.Exec,
|
||||
mm: mounts.NewMountManager(name, cm, sm, md),
|
||||
cm: cm,
|
||||
exec: exec,
|
||||
numInputs: len(v.Inputs()),
|
||||
w: w,
|
||||
platform: platform,
|
||||
op: op.Exec,
|
||||
mm: mounts.NewMountManager(name, cm, sm, md),
|
||||
cm: cm,
|
||||
exec: exec,
|
||||
numInputs: len(v.Inputs()),
|
||||
w: w,
|
||||
platform: platform,
|
||||
parallelism: parallelism,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -368,6 +371,9 @@ func proxyEnvList(p *pb.ProxyEnv) []string {
|
|||
if v := p.NoProxy; v != "" {
|
||||
out = append(out, "NO_PROXY="+v, "no_proxy="+v)
|
||||
}
|
||||
if v := p.AllProxy; v != "" {
|
||||
out = append(out, "ALL_PROXY="+v, "all_proxy="+v)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
|
@ -385,3 +391,16 @@ func parseExtraHosts(ips []*pb.HostIP) ([]executor.HostIP, error) {
|
|||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (e *execOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) {
|
||||
if e.parallelism == nil {
|
||||
return func() {}, nil
|
||||
}
|
||||
err := e.parallelism.Acquire(ctx, 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return func() {
|
||||
e.parallelism.Release(1)
|
||||
}, nil
|
||||
}
|
||||
|
|
79
vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go
generated
vendored
79
vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go
generated
vendored
|
@ -24,33 +24,36 @@ import (
|
|||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
const fileCacheType = "buildkit.file.v0"
|
||||
|
||||
type fileOp struct {
|
||||
op *pb.FileOp
|
||||
md *metadata.Store
|
||||
w worker.Worker
|
||||
solver *FileOpSolver
|
||||
numInputs int
|
||||
op *pb.FileOp
|
||||
md *metadata.Store
|
||||
w worker.Worker
|
||||
solver *FileOpSolver
|
||||
numInputs int
|
||||
parallelism *semaphore.Weighted
|
||||
}
|
||||
|
||||
func NewFileOp(v solver.Vertex, op *pb.Op_File, cm cache.Manager, md *metadata.Store, w worker.Worker) (solver.Op, error) {
|
||||
func NewFileOp(v solver.Vertex, op *pb.Op_File, cm cache.Manager, parallelism *semaphore.Weighted, md *metadata.Store, w worker.Worker) (solver.Op, error) {
|
||||
if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fileOp{
|
||||
op: op.File,
|
||||
md: md,
|
||||
numInputs: len(v.Inputs()),
|
||||
w: w,
|
||||
solver: NewFileOpSolver(w, &file.Backend{}, file.NewRefManager(cm)),
|
||||
op: op.File,
|
||||
md: md,
|
||||
numInputs: len(v.Inputs()),
|
||||
w: w,
|
||||
solver: NewFileOpSolver(w, &file.Backend{}, file.NewRefManager(cm)),
|
||||
parallelism: parallelism,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fileOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) {
|
||||
selectors := map[int]map[llbsolver.Selector]struct{}{}
|
||||
selectors := map[int][]llbsolver.Selector{}
|
||||
invalidSelectors := map[int]struct{}{}
|
||||
|
||||
actions := make([][]byte, 0, len(f.op.Actions))
|
||||
|
@ -95,7 +98,7 @@ func (f *fileOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol
|
|||
markInvalid(action.Input)
|
||||
processOwner(p.Owner, selectors)
|
||||
if action.SecondaryInput != -1 && int(action.SecondaryInput) < f.numInputs {
|
||||
addSelector(selectors, int(action.SecondaryInput), p.Src, p.AllowWildcard, p.FollowSymlink)
|
||||
addSelector(selectors, int(action.SecondaryInput), p.Src, p.AllowWildcard, p.FollowSymlink, p.IncludePatterns, p.ExcludePatterns)
|
||||
p.Src = path.Base(p.Src)
|
||||
}
|
||||
dt, err = json.Marshal(p)
|
||||
|
@ -139,7 +142,7 @@ func (f *fileOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol
|
|||
continue
|
||||
}
|
||||
dgsts := make([][]byte, 0, len(m))
|
||||
for k := range m {
|
||||
for _, k := range m {
|
||||
dgsts = append(dgsts, []byte(k.Path))
|
||||
}
|
||||
sort.Slice(dgsts, func(i, j int) bool {
|
||||
|
@ -179,21 +182,29 @@ func (f *fileOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu
|
|||
return outResults, nil
|
||||
}
|
||||
|
||||
func addSelector(m map[int]map[llbsolver.Selector]struct{}, idx int, sel string, wildcard, followLinks bool) {
|
||||
mm, ok := m[idx]
|
||||
if !ok {
|
||||
mm = map[llbsolver.Selector]struct{}{}
|
||||
m[idx] = mm
|
||||
func (f *fileOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) {
|
||||
if f.parallelism == nil {
|
||||
return func() {}, nil
|
||||
}
|
||||
s := llbsolver.Selector{Path: sel}
|
||||
err := f.parallelism.Acquire(ctx, 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return func() {
|
||||
f.parallelism.Release(1)
|
||||
}, nil
|
||||
}
|
||||
|
||||
if wildcard && containsWildcards(sel) {
|
||||
s.Wildcard = true
|
||||
func addSelector(m map[int][]llbsolver.Selector, idx int, sel string, wildcard, followLinks bool, includePatterns, excludePatterns []string) {
|
||||
s := llbsolver.Selector{
|
||||
Path: sel,
|
||||
FollowLinks: followLinks,
|
||||
Wildcard: wildcard && containsWildcards(sel),
|
||||
IncludePatterns: includePatterns,
|
||||
ExcludePatterns: excludePatterns,
|
||||
}
|
||||
if followLinks {
|
||||
s.FollowLinks = true
|
||||
}
|
||||
mm[s] = struct{}{}
|
||||
|
||||
m[idx] = append(m[idx], s)
|
||||
}
|
||||
|
||||
func containsWildcards(name string) bool {
|
||||
|
@ -209,11 +220,11 @@ func containsWildcards(name string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func dedupeSelectors(m map[llbsolver.Selector]struct{}) []llbsolver.Selector {
|
||||
func dedupeSelectors(m []llbsolver.Selector) []llbsolver.Selector {
|
||||
paths := make([]string, 0, len(m))
|
||||
pathsFollow := make([]string, 0, len(m))
|
||||
for sel := range m {
|
||||
if !sel.Wildcard {
|
||||
for _, sel := range m {
|
||||
if !sel.HasWildcardOrFilters() {
|
||||
if sel.FollowLinks {
|
||||
pathsFollow = append(pathsFollow, sel.Path)
|
||||
} else {
|
||||
|
@ -232,8 +243,8 @@ func dedupeSelectors(m map[llbsolver.Selector]struct{}) []llbsolver.Selector {
|
|||
selectors = append(selectors, llbsolver.Selector{Path: p, FollowLinks: true})
|
||||
}
|
||||
|
||||
for sel := range m {
|
||||
if sel.Wildcard {
|
||||
for _, sel := range m {
|
||||
if sel.HasWildcardOrFilters() {
|
||||
selectors = append(selectors, sel)
|
||||
}
|
||||
}
|
||||
|
@ -245,7 +256,7 @@ func dedupeSelectors(m map[llbsolver.Selector]struct{}) []llbsolver.Selector {
|
|||
return selectors
|
||||
}
|
||||
|
||||
func processOwner(chopt *pb.ChownOpt, selectors map[int]map[llbsolver.Selector]struct{}) error {
|
||||
func processOwner(chopt *pb.ChownOpt, selectors map[int][]llbsolver.Selector) error {
|
||||
if chopt == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -254,7 +265,7 @@ func processOwner(chopt *pb.ChownOpt, selectors map[int]map[llbsolver.Selector]s
|
|||
if u.ByName.Input < 0 {
|
||||
return errors.Errorf("invalid user index %d", u.ByName.Input)
|
||||
}
|
||||
addSelector(selectors, int(u.ByName.Input), "/etc/passwd", false, true)
|
||||
addSelector(selectors, int(u.ByName.Input), "/etc/passwd", false, true, nil, nil)
|
||||
}
|
||||
}
|
||||
if chopt.Group != nil {
|
||||
|
@ -262,7 +273,7 @@ func processOwner(chopt *pb.ChownOpt, selectors map[int]map[llbsolver.Selector]s
|
|||
if u.ByName.Input < 0 {
|
||||
return errors.Errorf("invalid user index %d", u.ByName.Input)
|
||||
}
|
||||
addSelector(selectors, int(u.ByName.Input), "/etc/group", false, true)
|
||||
addSelector(selectors, int(u.ByName.Input), "/etc/group", false, true, nil, nil)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
46
vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go
generated
vendored
46
vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go
generated
vendored
|
@ -12,32 +12,35 @@ import (
|
|||
"github.com/moby/buildkit/source"
|
||||
"github.com/moby/buildkit/worker"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
const sourceCacheType = "buildkit.source.v0"
|
||||
|
||||
type sourceOp struct {
|
||||
mu sync.Mutex
|
||||
op *pb.Op_Source
|
||||
platform *pb.Platform
|
||||
sm *source.Manager
|
||||
src source.SourceInstance
|
||||
sessM *session.Manager
|
||||
w worker.Worker
|
||||
vtx solver.Vertex
|
||||
mu sync.Mutex
|
||||
op *pb.Op_Source
|
||||
platform *pb.Platform
|
||||
sm *source.Manager
|
||||
src source.SourceInstance
|
||||
sessM *session.Manager
|
||||
w worker.Worker
|
||||
vtx solver.Vertex
|
||||
parallelism *semaphore.Weighted
|
||||
}
|
||||
|
||||
func NewSourceOp(vtx solver.Vertex, op *pb.Op_Source, platform *pb.Platform, sm *source.Manager, sessM *session.Manager, w worker.Worker) (solver.Op, error) {
|
||||
func NewSourceOp(vtx solver.Vertex, op *pb.Op_Source, platform *pb.Platform, sm *source.Manager, parallelism *semaphore.Weighted, sessM *session.Manager, w worker.Worker) (solver.Op, error) {
|
||||
if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &sourceOp{
|
||||
op: op,
|
||||
sm: sm,
|
||||
w: w,
|
||||
sessM: sessM,
|
||||
platform: platform,
|
||||
vtx: vtx,
|
||||
op: op,
|
||||
sm: sm,
|
||||
w: w,
|
||||
sessM: sessM,
|
||||
platform: platform,
|
||||
vtx: vtx,
|
||||
parallelism: parallelism,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -93,3 +96,16 @@ func (s *sourceOp) Exec(ctx context.Context, g session.Group, _ []solver.Result)
|
|||
}
|
||||
return []solver.Result{worker.NewWorkerRefResult(ref, s.w)}, nil
|
||||
}
|
||||
|
||||
func (s *sourceOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) {
|
||||
if s.parallelism == nil {
|
||||
return func() {}, nil
|
||||
}
|
||||
err := s.parallelism.Acquire(ctx, 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return func() {
|
||||
s.parallelism.Release(1)
|
||||
}, nil
|
||||
}
|
||||
|
|
37
vendor/github.com/moby/buildkit/solver/llbsolver/result.go
generated
vendored
37
vendor/github.com/moby/buildkit/solver/llbsolver/result.go
generated
vendored
|
@ -16,9 +16,15 @@ import (
|
|||
)
|
||||
|
||||
type Selector struct {
|
||||
Path string
|
||||
Wildcard bool
|
||||
FollowLinks bool
|
||||
Path string
|
||||
Wildcard bool
|
||||
FollowLinks bool
|
||||
IncludePatterns []string
|
||||
ExcludePatterns []string
|
||||
}
|
||||
|
||||
func (sel Selector) HasWildcardOrFilters() bool {
|
||||
return sel.Wildcard || len(sel.IncludePatterns) != 0 || len(sel.ExcludePatterns) != 0
|
||||
}
|
||||
|
||||
func UnlazyResultFunc(ctx context.Context, res solver.Result, g session.Group) error {
|
||||
|
@ -50,19 +56,20 @@ func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc {
|
|||
for i, sel := range selectors {
|
||||
i, sel := i, sel
|
||||
eg.Go(func() error {
|
||||
if !sel.Wildcard {
|
||||
dgst, err := contenthash.Checksum(ctx, ref.ImmutableRef, path.Join("/", sel.Path), sel.FollowLinks, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dgsts[i] = []byte(dgst)
|
||||
} else {
|
||||
dgst, err := contenthash.ChecksumWildcard(ctx, ref.ImmutableRef, path.Join("/", sel.Path), sel.FollowLinks, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dgsts[i] = []byte(dgst)
|
||||
dgst, err := contenthash.Checksum(
|
||||
ctx, ref.ImmutableRef, path.Join("/", sel.Path),
|
||||
contenthash.ChecksumOpts{
|
||||
Wildcard: sel.Wildcard,
|
||||
FollowLinks: sel.FollowLinks,
|
||||
IncludePatterns: sel.IncludePatterns,
|
||||
ExcludePatterns: sel.ExcludePatterns,
|
||||
},
|
||||
s,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dgsts[i] = []byte(dgst)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
|
18
vendor/github.com/moby/buildkit/solver/pb/caps.go
generated
vendored
18
vendor/github.com/moby/buildkit/solver/pb/caps.go
generated
vendored
|
@ -25,6 +25,7 @@ const (
|
|||
CapSourceGitHTTPAuth apicaps.CapID = "source.git.httpauth"
|
||||
CapSourceGitKnownSSHHosts apicaps.CapID = "source.git.knownsshhosts"
|
||||
CapSourceGitMountSSHSock apicaps.CapID = "source.git.mountsshsock"
|
||||
CapSourceGitSubdir apicaps.CapID = "source.git.subdir"
|
||||
|
||||
CapSourceHTTP apicaps.CapID = "source.http"
|
||||
CapSourceHTTPChecksum apicaps.CapID = "source.http.checksum"
|
||||
|
@ -50,8 +51,9 @@ const (
|
|||
|
||||
CapExecMetaSecurityDeviceWhitelistV1 apicaps.CapID = "exec.meta.security.devices.v1"
|
||||
|
||||
CapFileBase apicaps.CapID = "file.base"
|
||||
CapFileRmWildcard apicaps.CapID = "file.rm.wildcard"
|
||||
CapFileBase apicaps.CapID = "file.base"
|
||||
CapFileRmWildcard apicaps.CapID = "file.rm.wildcard"
|
||||
CapFileCopyIncludeExcludePatterns apicaps.CapID = "file.copy.includeexcludepatterns"
|
||||
|
||||
CapConstraints apicaps.CapID = "constraints"
|
||||
CapPlatform apicaps.CapID = "platform"
|
||||
|
@ -152,6 +154,12 @@ func init() {
|
|||
Status: apicaps.CapStatusExperimental,
|
||||
})
|
||||
|
||||
Caps.Init(apicaps.Cap{
|
||||
ID: CapSourceGitSubdir,
|
||||
Enabled: true,
|
||||
Status: apicaps.CapStatusExperimental,
|
||||
})
|
||||
|
||||
Caps.Init(apicaps.Cap{
|
||||
ID: CapSourceHTTP,
|
||||
Enabled: true,
|
||||
|
@ -288,6 +296,12 @@ func init() {
|
|||
Status: apicaps.CapStatusExperimental,
|
||||
})
|
||||
|
||||
Caps.Init(apicaps.Cap{
|
||||
ID: CapFileCopyIncludeExcludePatterns,
|
||||
Enabled: true,
|
||||
Status: apicaps.CapStatusExperimental,
|
||||
})
|
||||
|
||||
Caps.Init(apicaps.Cap{
|
||||
ID: CapConstraints,
|
||||
Enabled: true,
|
||||
|
|
625
vendor/github.com/moby/buildkit/solver/pb/ops.pb.go
generated
vendored
625
vendor/github.com/moby/buildkit/solver/pb/ops.pb.go
generated
vendored
|
@ -1409,6 +1409,7 @@ type ProxyEnv struct {
|
|||
HttpsProxy string `protobuf:"bytes,2,opt,name=https_proxy,json=httpsProxy,proto3" json:"https_proxy,omitempty"`
|
||||
FtpProxy string `protobuf:"bytes,3,opt,name=ftp_proxy,json=ftpProxy,proto3" json:"ftp_proxy,omitempty"`
|
||||
NoProxy string `protobuf:"bytes,4,opt,name=no_proxy,json=noProxy,proto3" json:"no_proxy,omitempty"`
|
||||
AllProxy string `protobuf:"bytes,5,opt,name=all_proxy,json=allProxy,proto3" json:"all_proxy,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ProxyEnv) Reset() { *m = ProxyEnv{} }
|
||||
|
@ -1468,6 +1469,13 @@ func (m *ProxyEnv) GetNoProxy() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func (m *ProxyEnv) GetAllProxy() string {
|
||||
if m != nil {
|
||||
return m.AllProxy
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// WorkerConstraints defines conditions for the worker
|
||||
type WorkerConstraints struct {
|
||||
Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"`
|
||||
|
@ -1791,6 +1799,10 @@ type FileActionCopy struct {
|
|||
AllowEmptyWildcard bool `protobuf:"varint,10,opt,name=allowEmptyWildcard,proto3" json:"allowEmptyWildcard,omitempty"`
|
||||
// optional created time override
|
||||
Timestamp int64 `protobuf:"varint,11,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
// include only files/dirs matching at least one of these patterns
|
||||
IncludePatterns []string `protobuf:"bytes,12,rep,name=include_patterns,json=includePatterns,proto3" json:"include_patterns,omitempty"`
|
||||
// exclude files/dir matching any of these patterns (even if they match an include pattern)
|
||||
ExcludePatterns []string `protobuf:"bytes,13,rep,name=exclude_patterns,json=excludePatterns,proto3" json:"exclude_patterns,omitempty"`
|
||||
}
|
||||
|
||||
func (m *FileActionCopy) Reset() { *m = FileActionCopy{} }
|
||||
|
@ -1899,6 +1911,20 @@ func (m *FileActionCopy) GetTimestamp() int64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (m *FileActionCopy) GetIncludePatterns() []string {
|
||||
if m != nil {
|
||||
return m.IncludePatterns
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FileActionCopy) GetExcludePatterns() []string {
|
||||
if m != nil {
|
||||
return m.ExcludePatterns
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type FileActionMkFile struct {
|
||||
// path for the new file
|
||||
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
|
||||
|
@ -2332,146 +2358,149 @@ func init() {
|
|||
func init() { proto.RegisterFile("ops.proto", fileDescriptor_8de16154b2733812) }
|
||||
|
||||
var fileDescriptor_8de16154b2733812 = []byte{
|
||||
// 2217 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6f, 0x1b, 0xc7,
|
||||
0x15, 0x17, 0xbf, 0xc9, 0x47, 0x49, 0x66, 0x27, 0x4e, 0xc2, 0xa8, 0xae, 0xa4, 0x6c, 0xdc, 0x40,
|
||||
0x96, 0x6d, 0x0a, 0x50, 0x80, 0x38, 0x08, 0x8a, 0xa2, 0xe2, 0x87, 0x21, 0xc6, 0xb6, 0x28, 0x0c,
|
||||
0xfd, 0xd1, 0x9b, 0xb1, 0x5a, 0x0e, 0xa9, 0x85, 0xc8, 0x9d, 0xc5, 0xec, 0xd0, 0x16, 0x2f, 0x3d,
|
||||
0xf8, 0x2f, 0x08, 0x50, 0xa0, 0xb7, 0x16, 0xe8, 0xa5, 0x7f, 0x41, 0xaf, 0x3d, 0x16, 0xc8, 0x31,
|
||||
0x87, 0x1e, 0x82, 0x1e, 0xd2, 0xc2, 0xbe, 0xf7, 0x3f, 0x28, 0x50, 0xbc, 0x37, 0xb3, 0x1f, 0x94,
|
||||
0x65, 0xd8, 0x46, 0x8b, 0x9e, 0x76, 0xe6, 0xbd, 0xdf, 0xbc, 0x79, 0xf3, 0xbe, 0xe6, 0xcd, 0x42,
|
||||
0x4d, 0x86, 0x51, 0x2b, 0x54, 0x52, 0x4b, 0x96, 0x0f, 0x4f, 0x36, 0x6e, 0x4f, 0x7c, 0x7d, 0x3a,
|
||||
0x3f, 0x69, 0x79, 0x72, 0xb6, 0x37, 0x91, 0x13, 0xb9, 0x47, 0xac, 0x93, 0xf9, 0x98, 0x66, 0x34,
|
||||
0xa1, 0x91, 0x59, 0xe2, 0xfc, 0x31, 0x0f, 0xf9, 0x41, 0xc8, 0x3e, 0x85, 0xb2, 0x1f, 0x84, 0x73,
|
||||
0x1d, 0x35, 0x73, 0xdb, 0x85, 0x9d, 0xfa, 0x7e, 0xad, 0x15, 0x9e, 0xb4, 0xfa, 0x48, 0xe1, 0x96,
|
||||
0xc1, 0xb6, 0xa1, 0x28, 0xce, 0x85, 0xd7, 0xcc, 0x6f, 0xe7, 0x76, 0xea, 0xfb, 0x80, 0x80, 0xde,
|
||||
0xb9, 0xf0, 0x06, 0xe1, 0xe1, 0x0a, 0x27, 0x0e, 0xfb, 0x1c, 0xca, 0x91, 0x9c, 0x2b, 0x4f, 0x34,
|
||||
0x0b, 0x84, 0x59, 0x45, 0xcc, 0x90, 0x28, 0x84, 0xb2, 0x5c, 0x94, 0x34, 0xf6, 0xa7, 0xa2, 0x59,
|
||||
0x4c, 0x25, 0xdd, 0xf5, 0xa7, 0x06, 0x43, 0x1c, 0xf6, 0x19, 0x94, 0x4e, 0xe6, 0xfe, 0x74, 0xd4,
|
||||
0x2c, 0x11, 0xa4, 0x8e, 0x90, 0x36, 0x12, 0x08, 0x63, 0x78, 0x6c, 0x07, 0xaa, 0xe1, 0xd4, 0xd5,
|
||||
0x63, 0xa9, 0x66, 0x4d, 0x48, 0x37, 0x3c, 0xb6, 0x34, 0x9e, 0x70, 0xd9, 0x1d, 0xa8, 0x7b, 0x32,
|
||||
0x88, 0xb4, 0x72, 0xfd, 0x40, 0x47, 0xcd, 0x3a, 0x81, 0x3f, 0x44, 0xf0, 0x13, 0xa9, 0xce, 0x84,
|
||||
0xea, 0xa4, 0x4c, 0x9e, 0x45, 0xb6, 0x8b, 0x90, 0x97, 0xa1, 0xf3, 0xbb, 0x1c, 0x54, 0x63, 0xa9,
|
||||
0xcc, 0x81, 0xd5, 0x03, 0xe5, 0x9d, 0xfa, 0x5a, 0x78, 0x7a, 0xae, 0x44, 0x33, 0xb7, 0x9d, 0xdb,
|
||||
0xa9, 0xf1, 0x25, 0x1a, 0x5b, 0x87, 0xfc, 0x60, 0x48, 0x86, 0xaa, 0xf1, 0xfc, 0x60, 0xc8, 0x9a,
|
||||
0x50, 0x79, 0xec, 0x2a, 0xdf, 0x0d, 0x34, 0x59, 0xa6, 0xc6, 0xe3, 0x29, 0xbb, 0x06, 0xb5, 0xc1,
|
||||
0xf0, 0xb1, 0x50, 0x91, 0x2f, 0x03, 0xb2, 0x47, 0x8d, 0xa7, 0x04, 0xb6, 0x09, 0x30, 0x18, 0xde,
|
||||
0x15, 0x2e, 0x0a, 0x8d, 0x9a, 0xa5, 0xed, 0xc2, 0x4e, 0x8d, 0x67, 0x28, 0xce, 0x6f, 0xa0, 0x44,
|
||||
0x3e, 0x62, 0xdf, 0x40, 0x79, 0xe4, 0x4f, 0x44, 0xa4, 0x8d, 0x3a, 0xed, 0xfd, 0xef, 0x7e, 0xdc,
|
||||
0x5a, 0xf9, 0xfb, 0x8f, 0x5b, 0xbb, 0x99, 0x60, 0x90, 0xa1, 0x08, 0x3c, 0x19, 0x68, 0xd7, 0x0f,
|
||||
0x84, 0x8a, 0xf6, 0x26, 0xf2, 0xb6, 0x59, 0xd2, 0xea, 0xd2, 0x87, 0x5b, 0x09, 0xec, 0x06, 0x94,
|
||||
0xfc, 0x60, 0x24, 0xce, 0x49, 0xff, 0x42, 0xfb, 0x03, 0x2b, 0xaa, 0x3e, 0x98, 0xeb, 0x70, 0xae,
|
||||
0xfb, 0xc8, 0xe2, 0x06, 0xe1, 0xfc, 0x21, 0x07, 0x65, 0x13, 0x03, 0xec, 0x1a, 0x14, 0x67, 0x42,
|
||||
0xbb, 0xb4, 0x7f, 0x7d, 0xbf, 0x8a, 0xb6, 0x7d, 0x20, 0xb4, 0xcb, 0x89, 0x8a, 0xe1, 0x35, 0x93,
|
||||
0x73, 0xb4, 0x7d, 0x3e, 0x0d, 0xaf, 0x07, 0x48, 0xe1, 0x96, 0xc1, 0x7e, 0x0e, 0x95, 0x40, 0xe8,
|
||||
0xe7, 0x52, 0x9d, 0x91, 0x8d, 0xd6, 0x8d, 0xd3, 0x8f, 0x84, 0x7e, 0x20, 0x47, 0x82, 0xc7, 0x3c,
|
||||
0x76, 0x0b, 0xaa, 0x91, 0xf0, 0xe6, 0xca, 0xd7, 0x0b, 0xb2, 0xd7, 0xfa, 0x7e, 0x83, 0xa2, 0xcc,
|
||||
0xd2, 0x08, 0x9c, 0x20, 0x9c, 0xbf, 0xe6, 0xa0, 0x88, 0x6a, 0x30, 0x06, 0x45, 0x57, 0x4d, 0x4c,
|
||||
0x74, 0xd7, 0x38, 0x8d, 0x59, 0x03, 0x0a, 0x22, 0x78, 0x46, 0x1a, 0xd5, 0x38, 0x0e, 0x91, 0xe2,
|
||||
0x3d, 0x1f, 0x59, 0x1f, 0xe1, 0x10, 0xd7, 0xcd, 0x23, 0xa1, 0xac, 0x6b, 0x68, 0xcc, 0x6e, 0x40,
|
||||
0x2d, 0x54, 0xf2, 0x7c, 0xf1, 0x14, 0x57, 0x97, 0x32, 0x81, 0x87, 0xc4, 0x5e, 0xf0, 0x8c, 0x57,
|
||||
0x43, 0x3b, 0x62, 0xbb, 0x00, 0xe2, 0x5c, 0x2b, 0xf7, 0x50, 0x46, 0x3a, 0x6a, 0x96, 0xe9, 0xec,
|
||||
0x14, 0xef, 0x48, 0xe8, 0x1f, 0xf3, 0x0c, 0x97, 0x6d, 0x40, 0xf5, 0x54, 0x46, 0x3a, 0x70, 0x67,
|
||||
0xa2, 0x59, 0xa1, 0xed, 0x92, 0xb9, 0xf3, 0xaf, 0x3c, 0x94, 0xc8, 0x5c, 0x6c, 0x07, 0xbd, 0x13,
|
||||
0xce, 0x8d, 0xa3, 0x0b, 0x6d, 0x66, 0xbd, 0x03, 0x14, 0x07, 0x89, 0x73, 0x30, 0x26, 0x36, 0xd0,
|
||||
0x52, 0x53, 0xe1, 0x69, 0xa9, 0x6c, 0x28, 0x26, 0x73, 0x3c, 0xd6, 0x08, 0xa3, 0xc5, 0x9c, 0x94,
|
||||
0xc6, 0xec, 0x26, 0x94, 0x25, 0xb9, 0x98, 0x0e, 0xfb, 0x06, 0xc7, 0x5b, 0x08, 0x0a, 0x57, 0xc2,
|
||||
0x1d, 0xc9, 0x60, 0xba, 0x20, 0x13, 0x54, 0x79, 0x32, 0x67, 0x37, 0xa1, 0x46, 0x3e, 0x7d, 0xb8,
|
||||
0x08, 0x45, 0xb3, 0x4c, 0x3e, 0x5a, 0x4b, 0xfc, 0x8d, 0x44, 0x9e, 0xf2, 0x31, 0x89, 0x3d, 0xd7,
|
||||
0x3b, 0x15, 0x83, 0x50, 0x37, 0xaf, 0xa6, 0xb6, 0xec, 0x58, 0x1a, 0x4f, 0xb8, 0x28, 0x36, 0x12,
|
||||
0x9e, 0x12, 0x1a, 0xa1, 0x1f, 0x12, 0x74, 0xcd, 0xba, 0xde, 0x10, 0x79, 0xca, 0x67, 0x0e, 0x94,
|
||||
0x87, 0xc3, 0x43, 0x44, 0x7e, 0x94, 0x16, 0x19, 0x43, 0xe1, 0x96, 0x63, 0xce, 0x10, 0xcd, 0xa7,
|
||||
0xba, 0xdf, 0x6d, 0x7e, 0x6c, 0x0c, 0x14, 0xcf, 0x9d, 0x3e, 0x54, 0x63, 0x15, 0x30, 0x9b, 0xfb,
|
||||
0x5d, 0x9b, 0xe7, 0xf9, 0x7e, 0x97, 0xdd, 0x86, 0x4a, 0x74, 0xea, 0x2a, 0x3f, 0x98, 0x90, 0x5d,
|
||||
0xd7, 0xf7, 0x3f, 0x48, 0x34, 0x1e, 0x1a, 0x3a, 0xee, 0x12, 0x63, 0x1c, 0x09, 0xb5, 0x44, 0xc5,
|
||||
0xd7, 0x64, 0x35, 0xa0, 0x30, 0xf7, 0x47, 0x24, 0x67, 0x8d, 0xe3, 0x10, 0x29, 0x13, 0xdf, 0xc4,
|
||||
0xe0, 0x1a, 0xc7, 0x21, 0x3a, 0x6b, 0x26, 0x47, 0xa6, 0x5c, 0xae, 0x71, 0x1a, 0xa3, 0xee, 0x32,
|
||||
0xd4, 0xbe, 0x0c, 0xdc, 0x69, 0x6c, 0xff, 0x78, 0xee, 0x4c, 0xe3, 0xb3, 0xff, 0x5f, 0x76, 0xfb,
|
||||
0x6d, 0x0e, 0xaa, 0x71, 0x8d, 0xc7, 0x82, 0xe5, 0x8f, 0x44, 0xa0, 0xfd, 0xb1, 0x2f, 0x94, 0xdd,
|
||||
0x38, 0x43, 0x61, 0xb7, 0xa1, 0xe4, 0x6a, 0xad, 0xe2, 0x32, 0xf0, 0x71, 0xf6, 0x82, 0x68, 0x1d,
|
||||
0x20, 0xa7, 0x17, 0x68, 0xb5, 0xe0, 0x06, 0xb5, 0xf1, 0x15, 0x40, 0x4a, 0x44, 0x5d, 0xcf, 0xc4,
|
||||
0xc2, 0x4a, 0xc5, 0x21, 0xbb, 0x0a, 0xa5, 0x67, 0xee, 0x74, 0x2e, 0x6c, 0x7c, 0x9b, 0xc9, 0xd7,
|
||||
0xf9, 0xaf, 0x72, 0xce, 0x5f, 0xf2, 0x50, 0xb1, 0x17, 0x06, 0xbb, 0x05, 0x15, 0xba, 0x30, 0xac,
|
||||
0x46, 0x97, 0x27, 0x4d, 0x0c, 0x61, 0x7b, 0xc9, 0x4d, 0x98, 0xd1, 0xd1, 0x8a, 0x32, 0x37, 0xa2,
|
||||
0xd5, 0x31, 0xbd, 0x17, 0x0b, 0x23, 0x31, 0xb6, 0x57, 0xde, 0x3a, 0xa2, 0xbb, 0x62, 0xec, 0x07,
|
||||
0x3e, 0xda, 0x87, 0x23, 0x8b, 0xdd, 0x8a, 0x4f, 0x5d, 0x24, 0x89, 0x1f, 0x65, 0x25, 0xbe, 0x7e,
|
||||
0xe8, 0x3e, 0xd4, 0x33, 0xdb, 0x5c, 0x72, 0xea, 0xeb, 0xd9, 0x53, 0xdb, 0x2d, 0x49, 0x9c, 0xb9,
|
||||
0xaf, 0x53, 0x2b, 0xfc, 0x17, 0xf6, 0xfb, 0x12, 0x20, 0x15, 0xf9, 0xee, 0x45, 0xc7, 0x79, 0x51,
|
||||
0x00, 0x18, 0x84, 0x58, 0x72, 0x47, 0x2e, 0xd5, 0xfd, 0x55, 0x7f, 0x12, 0x48, 0x25, 0x9e, 0x52,
|
||||
0x1a, 0xd3, 0xfa, 0x2a, 0xaf, 0x1b, 0x1a, 0x65, 0x0c, 0x3b, 0x80, 0xfa, 0x48, 0x44, 0x9e, 0xf2,
|
||||
0x29, 0xa0, 0xac, 0xd1, 0xb7, 0xf0, 0x4c, 0xa9, 0x9c, 0x56, 0x37, 0x45, 0x18, 0x5b, 0x65, 0xd7,
|
||||
0xb0, 0x7d, 0x58, 0x15, 0xe7, 0xa1, 0x54, 0xda, 0xee, 0x62, 0xfa, 0x8a, 0x2b, 0xa6, 0x43, 0x41,
|
||||
0x3a, 0xed, 0xc4, 0xeb, 0x22, 0x9d, 0x30, 0x17, 0x8a, 0x9e, 0x1b, 0x9a, 0x4b, 0xb5, 0xbe, 0xdf,
|
||||
0xbc, 0xb0, 0x5f, 0xc7, 0x0d, 0x8d, 0xd1, 0xda, 0x5f, 0xe0, 0x59, 0x5f, 0xfc, 0x63, 0xeb, 0x66,
|
||||
0xe6, 0x26, 0x9d, 0xc9, 0x93, 0xc5, 0x1e, 0xc5, 0xcb, 0x99, 0xaf, 0xf7, 0xe6, 0xda, 0x9f, 0xee,
|
||||
0xb9, 0xa1, 0x8f, 0xe2, 0x70, 0x61, 0xbf, 0xcb, 0x49, 0xf4, 0xc6, 0x2f, 0xa1, 0x71, 0x51, 0xef,
|
||||
0xf7, 0xf1, 0xc1, 0xc6, 0x1d, 0xa8, 0x25, 0x7a, 0xbc, 0x6d, 0x61, 0x35, 0xeb, 0xbc, 0x3f, 0xe7,
|
||||
0xa0, 0x6c, 0xb2, 0x8a, 0xdd, 0x81, 0xda, 0x54, 0x7a, 0x2e, 0x2a, 0x10, 0xb7, 0x76, 0x9f, 0xa4,
|
||||
0x49, 0xd7, 0xba, 0x1f, 0xf3, 0x8c, 0x55, 0x53, 0x2c, 0x06, 0x99, 0x1f, 0x8c, 0x65, 0x9c, 0x05,
|
||||
0xeb, 0xe9, 0xa2, 0x7e, 0x30, 0x96, 0xdc, 0x30, 0x37, 0xee, 0xc1, 0xfa, 0xb2, 0x88, 0x4b, 0xf4,
|
||||
0xfc, 0x6c, 0x39, 0x5c, 0xa9, 0x66, 0x27, 0x8b, 0xb2, 0x6a, 0xdf, 0x81, 0x5a, 0x42, 0x67, 0xbb,
|
||||
0xaf, 0x2b, 0xbe, 0x9a, 0x5d, 0x99, 0xd1, 0xd5, 0x99, 0x02, 0xa4, 0xaa, 0x61, 0xb1, 0xc2, 0x1e,
|
||||
0x92, 0xee, 0x51, 0xa3, 0x46, 0x32, 0xa7, 0x7b, 0xcf, 0xd5, 0x2e, 0xa9, 0xb2, 0xca, 0x69, 0xcc,
|
||||
0x5a, 0x00, 0xa3, 0x24, 0x61, 0xdf, 0x90, 0xc6, 0x19, 0x84, 0x33, 0x80, 0x6a, 0xac, 0x04, 0xdb,
|
||||
0x86, 0x7a, 0x64, 0x77, 0xc6, 0x8e, 0x09, 0xb7, 0x2b, 0xf1, 0x2c, 0x09, 0x3b, 0x1f, 0xe5, 0x06,
|
||||
0x13, 0xb1, 0xd4, 0xf9, 0x70, 0xa4, 0x70, 0xcb, 0x70, 0x9e, 0x40, 0x89, 0x08, 0x98, 0x66, 0x91,
|
||||
0x76, 0x95, 0xb6, 0x4d, 0x94, 0x69, 0x2a, 0x64, 0x44, 0xdb, 0xb6, 0x8b, 0x18, 0x88, 0xdc, 0x00,
|
||||
0xd8, 0x75, 0x6c, 0x5d, 0x46, 0xd6, 0xa2, 0x97, 0xe1, 0x90, 0xed, 0xfc, 0x02, 0xaa, 0x31, 0x19,
|
||||
0x4f, 0x7e, 0xdf, 0x0f, 0x84, 0x55, 0x91, 0xc6, 0xd8, 0x7c, 0x76, 0x4e, 0x5d, 0xe5, 0x7a, 0x5a,
|
||||
0x98, 0x16, 0xa1, 0xc4, 0x53, 0x82, 0xf3, 0x19, 0xd4, 0x33, 0xd9, 0x83, 0xe1, 0xf6, 0x98, 0xdc,
|
||||
0x68, 0x72, 0xd8, 0x4c, 0x9c, 0x17, 0xd8, 0x1a, 0xc7, 0xdd, 0xce, 0xcf, 0x00, 0x4e, 0xb5, 0x0e,
|
||||
0x9f, 0x52, 0xfb, 0x63, 0x6d, 0x5f, 0x43, 0x0a, 0x21, 0xd8, 0x16, 0xd4, 0x71, 0x12, 0x59, 0xbe,
|
||||
0x89, 0x77, 0x5a, 0x11, 0x19, 0xc0, 0x4f, 0xa1, 0x36, 0x4e, 0x96, 0x17, 0xac, 0xeb, 0xe2, 0xd5,
|
||||
0x9f, 0x40, 0x35, 0x90, 0x96, 0x67, 0xba, 0xb1, 0x4a, 0x20, 0x89, 0xe5, 0xdc, 0x84, 0x9f, 0xbc,
|
||||
0xd6, 0xc7, 0xb3, 0x8f, 0xa0, 0x3c, 0xf6, 0xa7, 0x9a, 0x8a, 0x3e, 0x36, 0x78, 0x76, 0xe6, 0xfc,
|
||||
0x3b, 0x07, 0x90, 0x7a, 0x16, 0xe3, 0x15, 0xab, 0x37, 0x62, 0x56, 0x4d, 0xb5, 0x9e, 0x42, 0x75,
|
||||
0x66, 0xeb, 0x80, 0xf5, 0xd9, 0xb5, 0xe5, 0x68, 0x68, 0xc5, 0x65, 0xc2, 0x54, 0x88, 0x7d, 0x5b,
|
||||
0x21, 0xde, 0xa7, 0xd7, 0x4e, 0x76, 0xa0, 0x46, 0x25, 0xfb, 0x66, 0x82, 0x34, 0xd1, 0xb8, 0xe5,
|
||||
0x6c, 0xdc, 0x83, 0xb5, 0xa5, 0x2d, 0xdf, 0xf1, 0x4e, 0x48, 0xeb, 0x59, 0x36, 0xcb, 0x6e, 0x41,
|
||||
0xd9, 0x34, 0x9f, 0x18, 0x12, 0x38, 0xb2, 0x62, 0x68, 0x4c, 0x1d, 0xc3, 0x71, 0xfc, 0x72, 0xe9,
|
||||
0x1f, 0x3b, 0xfb, 0x50, 0x36, 0x4f, 0x33, 0xb6, 0x03, 0x15, 0xd7, 0x33, 0xe9, 0x98, 0x29, 0x09,
|
||||
0xc8, 0x3c, 0x20, 0x32, 0x8f, 0xd9, 0xce, 0xdf, 0xf2, 0x00, 0x29, 0xfd, 0x3d, 0x3a, 0xd6, 0xaf,
|
||||
0x61, 0x3d, 0x12, 0x9e, 0x0c, 0x46, 0xae, 0x5a, 0x10, 0xd7, 0x3e, 0x41, 0x2e, 0x5b, 0x72, 0x01,
|
||||
0x99, 0xe9, 0x5e, 0x0b, 0x6f, 0xef, 0x5e, 0x77, 0xa0, 0xe8, 0xc9, 0x70, 0x61, 0x2f, 0x0a, 0xb6,
|
||||
0x7c, 0x90, 0x8e, 0x0c, 0x17, 0xf8, 0x10, 0x45, 0x04, 0x6b, 0x41, 0x79, 0x76, 0x46, 0x8f, 0x55,
|
||||
0xd3, 0xe8, 0x5f, 0x5d, 0xc6, 0x3e, 0x38, 0xc3, 0x31, 0x3e, 0x6d, 0x0d, 0x8a, 0xdd, 0x84, 0xd2,
|
||||
0xec, 0x6c, 0xe4, 0x2b, 0xea, 0x7b, 0xeb, 0xa6, 0x33, 0xcc, 0xc2, 0xbb, 0xbe, 0xc2, 0x07, 0x2c,
|
||||
0x61, 0x98, 0x03, 0x79, 0x35, 0xa3, 0x5e, 0xbf, 0x6e, 0x5e, 0x31, 0x19, 0x6b, 0xce, 0x0e, 0x57,
|
||||
0x78, 0x5e, 0xcd, 0xda, 0x55, 0x28, 0x1b, 0xbb, 0x3a, 0x7f, 0x2a, 0xc0, 0xfa, 0xb2, 0x96, 0x18,
|
||||
0x07, 0x91, 0xf2, 0xe2, 0x38, 0x88, 0x94, 0x97, 0x34, 0xf6, 0xf9, 0x4c, 0x63, 0xef, 0x40, 0x49,
|
||||
0x3e, 0x0f, 0x84, 0xca, 0xbe, 0xca, 0x3b, 0xa7, 0xf2, 0x79, 0x80, 0x6d, 0xaa, 0x61, 0x2d, 0x75,
|
||||
0x7d, 0x25, 0xdb, 0xf5, 0x5d, 0x87, 0xb5, 0xb1, 0x9c, 0x4e, 0xe5, 0xf3, 0xe1, 0x62, 0x36, 0xf5,
|
||||
0x83, 0x33, 0xdb, 0xfa, 0x2d, 0x13, 0xd9, 0x0e, 0x5c, 0x19, 0xf9, 0x0a, 0xd5, 0xe9, 0xc8, 0x40,
|
||||
0x8b, 0x80, 0xde, 0x39, 0x88, 0xbb, 0x48, 0x66, 0xdf, 0xc0, 0xb6, 0xab, 0xb5, 0x98, 0x85, 0xfa,
|
||||
0x51, 0x10, 0xba, 0xde, 0x59, 0x57, 0x7a, 0x94, 0xb3, 0xb3, 0xd0, 0xd5, 0xfe, 0x89, 0x3f, 0xc5,
|
||||
0x27, 0x5d, 0x85, 0x96, 0xbe, 0x15, 0xc7, 0x3e, 0x87, 0x75, 0x4f, 0x09, 0x57, 0x8b, 0xae, 0x88,
|
||||
0xf4, 0xb1, 0xab, 0x4f, 0x9b, 0x55, 0x5a, 0x79, 0x81, 0x8a, 0x67, 0x70, 0x51, 0xdb, 0x27, 0xfe,
|
||||
0x74, 0xe4, 0xb9, 0x6a, 0xd4, 0xac, 0x99, 0x33, 0x2c, 0x11, 0x59, 0x0b, 0x18, 0x11, 0x7a, 0xb3,
|
||||
0x50, 0x2f, 0x12, 0x28, 0x10, 0xf4, 0x12, 0x0e, 0x16, 0x4e, 0xed, 0xcf, 0x44, 0xa4, 0xdd, 0x59,
|
||||
0x48, 0x7f, 0x13, 0x0a, 0x3c, 0x25, 0x38, 0xdf, 0xe6, 0xa0, 0x71, 0x31, 0x44, 0xd0, 0xc0, 0x21,
|
||||
0xaa, 0x69, 0x93, 0x0d, 0xc7, 0x89, 0xd1, 0xf3, 0x19, 0xa3, 0xc7, 0x37, 0x54, 0x21, 0x73, 0x43,
|
||||
0x25, 0x0e, 0x2c, 0xbe, 0xd9, 0x81, 0x4b, 0x2a, 0x95, 0x2e, 0xaa, 0xf4, 0xfb, 0x1c, 0x5c, 0xb9,
|
||||
0x10, 0x86, 0xef, 0xac, 0xd1, 0x36, 0xd4, 0x67, 0xee, 0x99, 0x38, 0x76, 0x15, 0x39, 0xb7, 0x60,
|
||||
0x5a, 0xb8, 0x0c, 0xe9, 0x7f, 0xa0, 0x5f, 0x00, 0xab, 0xd9, 0xd8, 0xbf, 0x54, 0xb7, 0xd8, 0x95,
|
||||
0x47, 0x52, 0xdf, 0x95, 0x73, 0x7b, 0xfb, 0xc5, 0xae, 0x8c, 0x89, 0xaf, 0x3b, 0xbc, 0x70, 0x89,
|
||||
0xc3, 0x9d, 0x23, 0xa8, 0xc6, 0x0a, 0xb2, 0x2d, 0xfb, 0xc4, 0xcf, 0xa5, 0xbf, 0x9a, 0x1e, 0x45,
|
||||
0x42, 0xa1, 0xee, 0xe6, 0xbd, 0xff, 0x29, 0x94, 0x26, 0x4a, 0xce, 0x43, 0x5b, 0x5b, 0x97, 0x10,
|
||||
0x86, 0xe3, 0x0c, 0xa1, 0x62, 0x29, 0x6c, 0x17, 0xca, 0x27, 0x8b, 0xa3, 0xb8, 0xf9, 0xb0, 0x89,
|
||||
0x8d, 0xf3, 0x91, 0x45, 0x60, 0xb5, 0x30, 0x08, 0x76, 0x15, 0x8a, 0x27, 0x8b, 0x7e, 0xd7, 0x3c,
|
||||
0xc8, 0xb0, 0xe6, 0xe0, 0xac, 0x5d, 0x36, 0x0a, 0x39, 0xf7, 0x61, 0x35, 0xbb, 0x0e, 0x8d, 0x92,
|
||||
0x69, 0x6a, 0x68, 0x9c, 0x16, 0xd7, 0xfc, 0x5b, 0x8a, 0xeb, 0xee, 0x0e, 0x54, 0xec, 0xcf, 0x14,
|
||||
0x56, 0x83, 0xd2, 0xa3, 0xa3, 0x61, 0xef, 0x61, 0x63, 0x85, 0x55, 0xa1, 0x78, 0x38, 0x18, 0x3e,
|
||||
0x6c, 0xe4, 0x70, 0x74, 0x34, 0x38, 0xea, 0x35, 0xf2, 0xbb, 0x37, 0x60, 0x35, 0xfb, 0x3b, 0x85,
|
||||
0xd5, 0xa1, 0x32, 0x3c, 0x38, 0xea, 0xb6, 0x07, 0xbf, 0x6e, 0xac, 0xb0, 0x55, 0xa8, 0xf6, 0x8f,
|
||||
0x86, 0xbd, 0xce, 0x23, 0xde, 0x6b, 0xe4, 0x76, 0x7f, 0x05, 0xb5, 0xe4, 0x55, 0x8f, 0x12, 0xda,
|
||||
0xfd, 0xa3, 0x6e, 0x63, 0x85, 0x01, 0x94, 0x87, 0xbd, 0x0e, 0xef, 0xa1, 0xdc, 0x0a, 0x14, 0x86,
|
||||
0xc3, 0xc3, 0x46, 0x1e, 0x77, 0xed, 0x1c, 0x74, 0x0e, 0x7b, 0x8d, 0x02, 0x0e, 0x1f, 0x3e, 0x38,
|
||||
0xbe, 0x3b, 0x6c, 0x14, 0x77, 0xbf, 0x84, 0x2b, 0x17, 0x5e, 0xce, 0xb4, 0xfa, 0xf0, 0x80, 0xf7,
|
||||
0x50, 0x52, 0x1d, 0x2a, 0xc7, 0xbc, 0xff, 0xf8, 0xe0, 0x61, 0xaf, 0x91, 0x43, 0xc6, 0xfd, 0x41,
|
||||
0xe7, 0x5e, 0xaf, 0xdb, 0xc8, 0xb7, 0xaf, 0x7d, 0xf7, 0x72, 0x33, 0xf7, 0xfd, 0xcb, 0xcd, 0xdc,
|
||||
0x0f, 0x2f, 0x37, 0x73, 0xff, 0x7c, 0xb9, 0x99, 0xfb, 0xf6, 0xd5, 0xe6, 0xca, 0xf7, 0xaf, 0x36,
|
||||
0x57, 0x7e, 0x78, 0xb5, 0xb9, 0x72, 0x52, 0xa6, 0x9f, 0x9b, 0x5f, 0xfc, 0x27, 0x00, 0x00, 0xff,
|
||||
0xff, 0xa4, 0x50, 0x4f, 0x17, 0x1c, 0x15, 0x00, 0x00,
|
||||
// 2267 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6e, 0x1c, 0xc7,
|
||||
0xf1, 0xe7, 0x7e, 0xef, 0xd6, 0x2e, 0xa9, 0xfd, 0xb7, 0x65, 0x7b, 0xcd, 0xbf, 0x42, 0xd2, 0x63,
|
||||
0xc7, 0xa0, 0x28, 0x69, 0x09, 0xd0, 0x80, 0x65, 0x18, 0x41, 0x10, 0xee, 0x87, 0xc0, 0xb5, 0x25,
|
||||
0x2e, 0xd1, 0x2b, 0xc9, 0xb9, 0x09, 0xc3, 0xd9, 0x26, 0x39, 0xe0, 0xec, 0xf4, 0xa0, 0xa7, 0x57,
|
||||
0xe2, 0x5e, 0x72, 0xf0, 0x13, 0x18, 0x08, 0x90, 0x5b, 0x12, 0xe4, 0x1d, 0x72, 0xcd, 0x31, 0x80,
|
||||
0x8f, 0x3e, 0xe4, 0x60, 0xe4, 0xe0, 0x04, 0xd2, 0x3d, 0x4f, 0x90, 0x00, 0x41, 0x55, 0xf7, 0x7c,
|
||||
0x2c, 0x45, 0x41, 0x12, 0x12, 0xe4, 0x34, 0xdd, 0x55, 0xbf, 0xaa, 0xae, 0xae, 0xaa, 0xae, 0xae,
|
||||
0x1e, 0x68, 0xc8, 0x28, 0xee, 0x46, 0x4a, 0x6a, 0xc9, 0x8a, 0xd1, 0xf1, 0xfa, 0x9d, 0x53, 0x5f,
|
||||
0x9f, 0xcd, 0x8f, 0xbb, 0x9e, 0x9c, 0xed, 0x9e, 0xca, 0x53, 0xb9, 0x4b, 0xac, 0xe3, 0xf9, 0x09,
|
||||
0xcd, 0x68, 0x42, 0x23, 0x23, 0xe2, 0xfc, 0xa1, 0x08, 0xc5, 0x71, 0xc4, 0x3e, 0x84, 0xaa, 0x1f,
|
||||
0x46, 0x73, 0x1d, 0x77, 0x0a, 0x5b, 0xa5, 0xed, 0xe6, 0x5e, 0xa3, 0x1b, 0x1d, 0x77, 0x47, 0x48,
|
||||
0xe1, 0x96, 0xc1, 0xb6, 0xa0, 0x2c, 0x2e, 0x84, 0xd7, 0x29, 0x6e, 0x15, 0xb6, 0x9b, 0x7b, 0x80,
|
||||
0x80, 0xe1, 0x85, 0xf0, 0xc6, 0xd1, 0xc1, 0x0a, 0x27, 0x0e, 0xfb, 0x04, 0xaa, 0xb1, 0x9c, 0x2b,
|
||||
0x4f, 0x74, 0x4a, 0x84, 0x69, 0x21, 0x66, 0x42, 0x14, 0x42, 0x59, 0x2e, 0x6a, 0x3a, 0xf1, 0x03,
|
||||
0xd1, 0x29, 0x67, 0x9a, 0xee, 0xf9, 0x81, 0xc1, 0x10, 0x87, 0x7d, 0x04, 0x95, 0xe3, 0xb9, 0x1f,
|
||||
0x4c, 0x3b, 0x15, 0x82, 0x34, 0x11, 0xd2, 0x43, 0x02, 0x61, 0x0c, 0x8f, 0x6d, 0x43, 0x3d, 0x0a,
|
||||
0x5c, 0x7d, 0x22, 0xd5, 0xac, 0x03, 0xd9, 0x82, 0x47, 0x96, 0xc6, 0x53, 0x2e, 0xbb, 0x0b, 0x4d,
|
||||
0x4f, 0x86, 0xb1, 0x56, 0xae, 0x1f, 0xea, 0xb8, 0xd3, 0x24, 0xf0, 0xbb, 0x08, 0xfe, 0x5a, 0xaa,
|
||||
0x73, 0xa1, 0xfa, 0x19, 0x93, 0xe7, 0x91, 0xbd, 0x32, 0x14, 0x65, 0xe4, 0xfc, 0xa6, 0x00, 0xf5,
|
||||
0x44, 0x2b, 0x73, 0xa0, 0xb5, 0xaf, 0xbc, 0x33, 0x5f, 0x0b, 0x4f, 0xcf, 0x95, 0xe8, 0x14, 0xb6,
|
||||
0x0a, 0xdb, 0x0d, 0xbe, 0x44, 0x63, 0x6b, 0x50, 0x1c, 0x4f, 0xc8, 0x51, 0x0d, 0x5e, 0x1c, 0x4f,
|
||||
0x58, 0x07, 0x6a, 0x8f, 0x5d, 0xe5, 0xbb, 0xa1, 0x26, 0xcf, 0x34, 0x78, 0x32, 0x65, 0x37, 0xa0,
|
||||
0x31, 0x9e, 0x3c, 0x16, 0x2a, 0xf6, 0x65, 0x48, 0xfe, 0x68, 0xf0, 0x8c, 0xc0, 0x36, 0x00, 0xc6,
|
||||
0x93, 0x7b, 0xc2, 0x45, 0xa5, 0x71, 0xa7, 0xb2, 0x55, 0xda, 0x6e, 0xf0, 0x1c, 0xc5, 0xf9, 0x15,
|
||||
0x54, 0x28, 0x46, 0xec, 0x4b, 0xa8, 0x4e, 0xfd, 0x53, 0x11, 0x6b, 0x63, 0x4e, 0x6f, 0xef, 0xbb,
|
||||
0x1f, 0x37, 0x57, 0xfe, 0xfa, 0xe3, 0xe6, 0x4e, 0x2e, 0x19, 0x64, 0x24, 0x42, 0x4f, 0x86, 0xda,
|
||||
0xf5, 0x43, 0xa1, 0xe2, 0xdd, 0x53, 0x79, 0xc7, 0x88, 0x74, 0x07, 0xf4, 0xe1, 0x56, 0x03, 0xbb,
|
||||
0x09, 0x15, 0x3f, 0x9c, 0x8a, 0x0b, 0xb2, 0xbf, 0xd4, 0x7b, 0xc7, 0xaa, 0x6a, 0x8e, 0xe7, 0x3a,
|
||||
0x9a, 0xeb, 0x11, 0xb2, 0xb8, 0x41, 0x38, 0xbf, 0x2b, 0x40, 0xd5, 0xe4, 0x00, 0xbb, 0x01, 0xe5,
|
||||
0x99, 0xd0, 0x2e, 0xad, 0xdf, 0xdc, 0xab, 0xa3, 0x6f, 0x1f, 0x08, 0xed, 0x72, 0xa2, 0x62, 0x7a,
|
||||
0xcd, 0xe4, 0x1c, 0x7d, 0x5f, 0xcc, 0xd2, 0xeb, 0x01, 0x52, 0xb8, 0x65, 0xb0, 0x9f, 0x42, 0x2d,
|
||||
0x14, 0xfa, 0x99, 0x54, 0xe7, 0xe4, 0xa3, 0x35, 0x13, 0xf4, 0x43, 0xa1, 0x1f, 0xc8, 0xa9, 0xe0,
|
||||
0x09, 0x8f, 0xdd, 0x86, 0x7a, 0x2c, 0xbc, 0xb9, 0xf2, 0xf5, 0x82, 0xfc, 0xb5, 0xb6, 0xd7, 0xa6,
|
||||
0x2c, 0xb3, 0x34, 0x02, 0xa7, 0x08, 0xe7, 0xcf, 0x05, 0x28, 0xa3, 0x19, 0x8c, 0x41, 0xd9, 0x55,
|
||||
0xa7, 0x26, 0xbb, 0x1b, 0x9c, 0xc6, 0xac, 0x0d, 0x25, 0x11, 0x3e, 0x25, 0x8b, 0x1a, 0x1c, 0x87,
|
||||
0x48, 0xf1, 0x9e, 0x4d, 0x6d, 0x8c, 0x70, 0x88, 0x72, 0xf3, 0x58, 0x28, 0x1b, 0x1a, 0x1a, 0xb3,
|
||||
0x9b, 0xd0, 0x88, 0x94, 0xbc, 0x58, 0x3c, 0x41, 0xe9, 0x4a, 0x2e, 0xf1, 0x90, 0x38, 0x0c, 0x9f,
|
||||
0xf2, 0x7a, 0x64, 0x47, 0x6c, 0x07, 0x40, 0x5c, 0x68, 0xe5, 0x1e, 0xc8, 0x58, 0xc7, 0x9d, 0x2a,
|
||||
0xed, 0x9d, 0xf2, 0x1d, 0x09, 0xa3, 0x23, 0x9e, 0xe3, 0xb2, 0x75, 0xa8, 0x9f, 0xc9, 0x58, 0x87,
|
||||
0xee, 0x4c, 0x74, 0x6a, 0xb4, 0x5c, 0x3a, 0x77, 0xfe, 0x51, 0x84, 0x0a, 0xb9, 0x8b, 0x6d, 0x63,
|
||||
0x74, 0xa2, 0xb9, 0x09, 0x74, 0xa9, 0xc7, 0x6c, 0x74, 0x80, 0xf2, 0x20, 0x0d, 0x0e, 0xe6, 0xc4,
|
||||
0x3a, 0x7a, 0x2a, 0x10, 0x9e, 0x96, 0xca, 0xa6, 0x62, 0x3a, 0xc7, 0x6d, 0x4d, 0x31, 0x5b, 0xcc,
|
||||
0x4e, 0x69, 0xcc, 0x6e, 0x41, 0x55, 0x52, 0x88, 0x69, 0xb3, 0xaf, 0x08, 0xbc, 0x85, 0xa0, 0x72,
|
||||
0x25, 0xdc, 0xa9, 0x0c, 0x83, 0x05, 0xb9, 0xa0, 0xce, 0xd3, 0x39, 0xbb, 0x05, 0x0d, 0x8a, 0xe9,
|
||||
0xc3, 0x45, 0x24, 0x3a, 0x55, 0x8a, 0xd1, 0x6a, 0x1a, 0x6f, 0x24, 0xf2, 0x8c, 0x8f, 0x87, 0xd8,
|
||||
0x73, 0xbd, 0x33, 0x31, 0x8e, 0x74, 0xe7, 0x7a, 0xe6, 0xcb, 0xbe, 0xa5, 0xf1, 0x94, 0x8b, 0x6a,
|
||||
0x63, 0xe1, 0x29, 0xa1, 0x11, 0xfa, 0x2e, 0x41, 0x57, 0x6d, 0xe8, 0x0d, 0x91, 0x67, 0x7c, 0xe6,
|
||||
0x40, 0x75, 0x32, 0x39, 0x40, 0xe4, 0x7b, 0x59, 0x91, 0x31, 0x14, 0x6e, 0x39, 0x66, 0x0f, 0xf1,
|
||||
0x3c, 0xd0, 0xa3, 0x41, 0xe7, 0x7d, 0xe3, 0xa0, 0x64, 0xee, 0x8c, 0xa0, 0x9e, 0x98, 0x80, 0xa7,
|
||||
0x79, 0x34, 0xb0, 0xe7, 0xbc, 0x38, 0x1a, 0xb0, 0x3b, 0x50, 0x8b, 0xcf, 0x5c, 0xe5, 0x87, 0xa7,
|
||||
0xe4, 0xd7, 0xb5, 0xbd, 0x77, 0x52, 0x8b, 0x27, 0x86, 0x8e, 0xab, 0x24, 0x18, 0x47, 0x42, 0x23,
|
||||
0x35, 0xf1, 0x25, 0x5d, 0x6d, 0x28, 0xcd, 0xfd, 0x29, 0xe9, 0x59, 0xe5, 0x38, 0x44, 0xca, 0xa9,
|
||||
0x6f, 0x72, 0x70, 0x95, 0xe3, 0x10, 0x83, 0x35, 0x93, 0x53, 0x53, 0x2e, 0x57, 0x39, 0x8d, 0xd1,
|
||||
0x76, 0x19, 0x69, 0x5f, 0x86, 0x6e, 0x90, 0xf8, 0x3f, 0x99, 0x3b, 0x41, 0xb2, 0xf7, 0xff, 0xc9,
|
||||
0x6a, 0xbf, 0x2e, 0x40, 0x3d, 0xa9, 0xf1, 0x58, 0xb0, 0xfc, 0xa9, 0x08, 0xb5, 0x7f, 0xe2, 0x0b,
|
||||
0x65, 0x17, 0xce, 0x51, 0xd8, 0x1d, 0xa8, 0xb8, 0x5a, 0xab, 0xa4, 0x0c, 0xbc, 0x9f, 0xbf, 0x20,
|
||||
0xba, 0xfb, 0xc8, 0x19, 0x86, 0x5a, 0x2d, 0xb8, 0x41, 0xad, 0x7f, 0x0e, 0x90, 0x11, 0xd1, 0xd6,
|
||||
0x73, 0xb1, 0xb0, 0x5a, 0x71, 0xc8, 0xae, 0x43, 0xe5, 0xa9, 0x1b, 0xcc, 0x85, 0xcd, 0x6f, 0x33,
|
||||
0xf9, 0xa2, 0xf8, 0x79, 0xc1, 0xf9, 0x53, 0x11, 0x6a, 0xf6, 0xc2, 0x60, 0xb7, 0xa1, 0x46, 0x17,
|
||||
0x86, 0xb5, 0xe8, 0xea, 0x43, 0x93, 0x40, 0xd8, 0x6e, 0x7a, 0x13, 0xe6, 0x6c, 0xb4, 0xaa, 0xcc,
|
||||
0x8d, 0x68, 0x6d, 0xcc, 0xee, 0xc5, 0xd2, 0x54, 0x9c, 0xd8, 0x2b, 0x6f, 0x0d, 0xd1, 0x03, 0x71,
|
||||
0xe2, 0x87, 0x3e, 0xfa, 0x87, 0x23, 0x8b, 0xdd, 0x4e, 0x76, 0x5d, 0x26, 0x8d, 0xef, 0xe5, 0x35,
|
||||
0xbe, 0xbc, 0xe9, 0x11, 0x34, 0x73, 0xcb, 0x5c, 0xb1, 0xeb, 0x8f, 0xf3, 0xbb, 0xb6, 0x4b, 0x92,
|
||||
0x3a, 0x73, 0x5f, 0x67, 0x5e, 0xf8, 0x0f, 0xfc, 0xf7, 0x19, 0x40, 0xa6, 0xf2, 0xcd, 0x8b, 0x8e,
|
||||
0xf3, 0x4d, 0x09, 0x60, 0x1c, 0x61, 0xc9, 0x9d, 0xba, 0x54, 0xf7, 0x5b, 0xfe, 0x69, 0x28, 0x95,
|
||||
0x78, 0x42, 0xc7, 0x98, 0xe4, 0xeb, 0xbc, 0x69, 0x68, 0x74, 0x62, 0xd8, 0x3e, 0x34, 0xa7, 0x22,
|
||||
0xf6, 0x94, 0x4f, 0x09, 0x65, 0x9d, 0xbe, 0x89, 0x7b, 0xca, 0xf4, 0x74, 0x07, 0x19, 0xc2, 0xf8,
|
||||
0x2a, 0x2f, 0xc3, 0xf6, 0xa0, 0x25, 0x2e, 0x22, 0xa9, 0xb4, 0x5d, 0xc5, 0xf4, 0x15, 0xd7, 0x4c,
|
||||
0x87, 0x82, 0x74, 0x5a, 0x89, 0x37, 0x45, 0x36, 0x61, 0x2e, 0x94, 0x3d, 0x37, 0x32, 0x97, 0x6a,
|
||||
0x73, 0xaf, 0x73, 0x69, 0xbd, 0xbe, 0x1b, 0x19, 0xa7, 0xf5, 0x3e, 0xc5, 0xbd, 0x7e, 0xf3, 0xb7,
|
||||
0xcd, 0x5b, 0xb9, 0x9b, 0x74, 0x26, 0x8f, 0x17, 0xbb, 0x94, 0x2f, 0xe7, 0xbe, 0xde, 0x9d, 0x6b,
|
||||
0x3f, 0xd8, 0x75, 0x23, 0x1f, 0xd5, 0xa1, 0xe0, 0x68, 0xc0, 0x49, 0xf5, 0xfa, 0xcf, 0xa1, 0x7d,
|
||||
0xd9, 0xee, 0xb7, 0x89, 0xc1, 0xfa, 0x5d, 0x68, 0xa4, 0x76, 0xbc, 0x4e, 0xb0, 0x9e, 0x0f, 0xde,
|
||||
0x1f, 0x0b, 0x50, 0x35, 0xa7, 0x8a, 0xdd, 0x85, 0x46, 0x20, 0x3d, 0x17, 0x0d, 0x48, 0x5a, 0xbb,
|
||||
0x0f, 0xb2, 0x43, 0xd7, 0xbd, 0x9f, 0xf0, 0x8c, 0x57, 0x33, 0x2c, 0x26, 0x99, 0x1f, 0x9e, 0xc8,
|
||||
0xe4, 0x14, 0xac, 0x65, 0x42, 0xa3, 0xf0, 0x44, 0x72, 0xc3, 0x5c, 0xff, 0x0a, 0xd6, 0x96, 0x55,
|
||||
0x5c, 0x61, 0xe7, 0x47, 0xcb, 0xe9, 0x4a, 0x35, 0x3b, 0x15, 0xca, 0x9b, 0x7d, 0x17, 0x1a, 0x29,
|
||||
0x9d, 0xed, 0xbc, 0x6c, 0x78, 0x2b, 0x2f, 0x99, 0xb3, 0xd5, 0x09, 0x00, 0x32, 0xd3, 0xb0, 0x58,
|
||||
0x61, 0x0f, 0x49, 0xf7, 0xa8, 0x31, 0x23, 0x9d, 0xd3, 0xbd, 0xe7, 0x6a, 0x97, 0x4c, 0x69, 0x71,
|
||||
0x1a, 0xb3, 0x2e, 0xc0, 0x34, 0x3d, 0xb0, 0xaf, 0x38, 0xc6, 0x39, 0x84, 0x33, 0x86, 0x7a, 0x62,
|
||||
0x04, 0xdb, 0x82, 0x66, 0x6c, 0x57, 0xc6, 0x8e, 0x09, 0x97, 0xab, 0xf0, 0x3c, 0x09, 0x3b, 0x1f,
|
||||
0xe5, 0x86, 0xa7, 0x62, 0xa9, 0xf3, 0xe1, 0x48, 0xe1, 0x96, 0xe1, 0x7c, 0x0d, 0x15, 0x22, 0xe0,
|
||||
0x31, 0x8b, 0xb5, 0xab, 0xb4, 0x6d, 0xa2, 0x4c, 0x53, 0x21, 0x63, 0x5a, 0xb6, 0x57, 0xc6, 0x44,
|
||||
0xe4, 0x06, 0xc0, 0x3e, 0xc6, 0xd6, 0x65, 0x6a, 0x3d, 0x7a, 0x15, 0x0e, 0xd9, 0xce, 0xcf, 0xa0,
|
||||
0x9e, 0x90, 0x71, 0xe7, 0xf7, 0xfd, 0x50, 0x58, 0x13, 0x69, 0x8c, 0xcd, 0x67, 0xff, 0xcc, 0x55,
|
||||
0xae, 0xa7, 0x85, 0x69, 0x11, 0x2a, 0x3c, 0x23, 0x38, 0x1f, 0x41, 0x33, 0x77, 0x7a, 0x30, 0xdd,
|
||||
0x1e, 0x53, 0x18, 0xcd, 0x19, 0x36, 0x13, 0xe7, 0xf7, 0xd8, 0x1a, 0x27, 0xdd, 0xce, 0x4f, 0x00,
|
||||
0xce, 0xb4, 0x8e, 0x9e, 0x50, 0xfb, 0x63, 0x7d, 0xdf, 0x40, 0x0a, 0x21, 0xd8, 0x26, 0x34, 0x71,
|
||||
0x12, 0x5b, 0xbe, 0xc9, 0x77, 0x92, 0x88, 0x0d, 0xe0, 0xff, 0xa1, 0x71, 0x92, 0x8a, 0x97, 0x6c,
|
||||
0xe8, 0x12, 0xe9, 0x0f, 0xa0, 0x1e, 0x4a, 0xcb, 0x33, 0xdd, 0x58, 0x2d, 0x94, 0xa9, 0x9c, 0x1b,
|
||||
0x04, 0x96, 0x57, 0x31, 0x72, 0x6e, 0x10, 0x10, 0xd3, 0xb9, 0x05, 0xff, 0xf7, 0x52, 0x93, 0xcf,
|
||||
0xde, 0x83, 0xea, 0x89, 0x1f, 0x68, 0xba, 0x11, 0xb0, 0xfb, 0xb3, 0x33, 0xe7, 0x5f, 0x05, 0x80,
|
||||
0x2c, 0xec, 0x98, 0xcc, 0x58, 0xda, 0x11, 0xd3, 0x32, 0xa5, 0x3c, 0x80, 0xfa, 0xcc, 0x16, 0x09,
|
||||
0x1b, 0xd0, 0x1b, 0xcb, 0xa9, 0xd2, 0x4d, 0x6a, 0x88, 0x29, 0x1f, 0x7b, 0xb6, 0x7c, 0xbc, 0x4d,
|
||||
0x23, 0x9e, 0xae, 0x40, 0x5d, 0x4c, 0xfe, 0x41, 0x05, 0xd9, 0x29, 0xe4, 0x96, 0xb3, 0xfe, 0x15,
|
||||
0xac, 0x2e, 0x2d, 0xf9, 0x86, 0x17, 0x46, 0x56, 0xec, 0xf2, 0x47, 0xf0, 0x36, 0x54, 0x4d, 0x67,
|
||||
0x8a, 0xf9, 0x82, 0x23, 0xab, 0x86, 0xc6, 0xd4, 0x4e, 0x1c, 0x25, 0xcf, 0x9a, 0xd1, 0x91, 0xb3,
|
||||
0x07, 0x55, 0xf3, 0x6e, 0x63, 0xdb, 0x50, 0x73, 0x3d, 0x73, 0x56, 0x73, 0xf5, 0x02, 0x99, 0xfb,
|
||||
0x44, 0xe6, 0x09, 0xdb, 0xf9, 0x4b, 0x11, 0x20, 0xa3, 0xbf, 0x45, 0x3b, 0xfb, 0x05, 0xac, 0xc5,
|
||||
0xc2, 0x93, 0xe1, 0xd4, 0x55, 0x0b, 0xe2, 0xda, 0xf7, 0xc9, 0x55, 0x22, 0x97, 0x90, 0xb9, 0xd6,
|
||||
0xb6, 0xf4, 0xfa, 0xd6, 0x76, 0x1b, 0xca, 0x9e, 0x8c, 0x16, 0xf6, 0x16, 0x61, 0xcb, 0x1b, 0xe9,
|
||||
0xcb, 0x68, 0x81, 0xaf, 0x54, 0x44, 0xb0, 0x2e, 0x54, 0x67, 0xe7, 0xf4, 0x92, 0x35, 0xaf, 0x80,
|
||||
0xeb, 0xcb, 0xd8, 0x07, 0xe7, 0x38, 0xc6, 0x77, 0xaf, 0x41, 0xb1, 0x5b, 0x50, 0x99, 0x9d, 0x4f,
|
||||
0x7d, 0x45, 0x4d, 0x71, 0xd3, 0xb4, 0x8d, 0x79, 0xf8, 0xc0, 0x57, 0xf8, 0xba, 0x25, 0x0c, 0x73,
|
||||
0xa0, 0xa8, 0x66, 0xf4, 0x10, 0x68, 0x9a, 0x27, 0x4e, 0xce, 0x9b, 0xb3, 0x83, 0x15, 0x5e, 0x54,
|
||||
0xb3, 0x5e, 0x1d, 0xaa, 0xc6, 0xaf, 0xce, 0x3f, 0x4b, 0xb0, 0xb6, 0x6c, 0x25, 0xe6, 0x41, 0xac,
|
||||
0xbc, 0x24, 0x0f, 0x62, 0xe5, 0xa5, 0x5d, 0x7f, 0x31, 0xd7, 0xf5, 0x3b, 0x50, 0x91, 0xcf, 0x42,
|
||||
0xa1, 0xf2, 0x4f, 0xf6, 0xfe, 0x99, 0x7c, 0x16, 0x62, 0x0f, 0x6b, 0x58, 0x4b, 0x2d, 0x61, 0xc5,
|
||||
0xb6, 0x84, 0x1f, 0xc3, 0xea, 0x89, 0x0c, 0x02, 0xf9, 0x6c, 0xb2, 0x98, 0x05, 0x7e, 0x78, 0x6e,
|
||||
0xfb, 0xc2, 0x65, 0x22, 0xdb, 0x86, 0x6b, 0x53, 0x5f, 0xa1, 0x39, 0x7d, 0x19, 0x6a, 0x11, 0xd2,
|
||||
0x23, 0x08, 0x71, 0x97, 0xc9, 0xec, 0x4b, 0xd8, 0x72, 0xb5, 0x16, 0xb3, 0x48, 0x3f, 0x0a, 0x23,
|
||||
0xd7, 0x3b, 0x1f, 0x48, 0x8f, 0xce, 0xec, 0x2c, 0x72, 0xb5, 0x7f, 0xec, 0x07, 0xf8, 0xde, 0xab,
|
||||
0x91, 0xe8, 0x6b, 0x71, 0xec, 0x13, 0x58, 0xf3, 0x94, 0x70, 0xb5, 0x18, 0x88, 0x58, 0x1f, 0xb9,
|
||||
0xfa, 0xac, 0x53, 0x27, 0xc9, 0x4b, 0x54, 0xdc, 0x83, 0x8b, 0xd6, 0x7e, 0xed, 0x07, 0x53, 0xcf,
|
||||
0x55, 0xd3, 0x4e, 0xc3, 0xec, 0x61, 0x89, 0xc8, 0xba, 0xc0, 0x88, 0x30, 0x9c, 0x45, 0x7a, 0x91,
|
||||
0x42, 0x81, 0xa0, 0x57, 0x70, 0xb0, 0xaa, 0x6a, 0x7f, 0x26, 0x62, 0xed, 0xce, 0x22, 0xfa, 0xd5,
|
||||
0x50, 0xe2, 0x19, 0x81, 0xdd, 0x84, 0xb6, 0x1f, 0x7a, 0xc1, 0x7c, 0x2a, 0x9e, 0x44, 0xb8, 0x11,
|
||||
0x15, 0xc6, 0x9d, 0x16, 0xd5, 0xa0, 0x6b, 0x96, 0x7e, 0x64, 0xc9, 0x08, 0x15, 0x17, 0x97, 0xa0,
|
||||
0xab, 0x06, 0x6a, 0xe9, 0x09, 0xd4, 0xf9, 0xb6, 0x00, 0xed, 0xcb, 0x89, 0x87, 0x61, 0x8b, 0x70,
|
||||
0xf3, 0xf6, 0x08, 0xe3, 0x38, 0x0d, 0x65, 0x31, 0x17, 0xca, 0xe4, 0x52, 0x2c, 0xe5, 0x2e, 0xc5,
|
||||
0x34, 0x2d, 0xca, 0xaf, 0x4e, 0x8b, 0xa5, 0x8d, 0x56, 0x2e, 0x6d, 0xd4, 0xf9, 0x6d, 0x01, 0xae,
|
||||
0x5d, 0x4a, 0xee, 0x37, 0xb6, 0x68, 0x0b, 0x9a, 0x33, 0xf7, 0x5c, 0x1c, 0xb9, 0x8a, 0x52, 0xa6,
|
||||
0x64, 0xba, 0xc6, 0x1c, 0xe9, 0xbf, 0x60, 0x5f, 0x08, 0xad, 0xfc, 0x89, 0xba, 0xd2, 0xb6, 0x24,
|
||||
0x41, 0x0e, 0xa5, 0xbe, 0x27, 0xe7, 0xf6, 0xc2, 0x4d, 0x12, 0x24, 0x21, 0xbe, 0x9c, 0x46, 0xa5,
|
||||
0x2b, 0xd2, 0xc8, 0x39, 0x84, 0x7a, 0x62, 0x20, 0xdb, 0xb4, 0x7f, 0x15, 0x0a, 0xd9, 0xdf, 0xad,
|
||||
0x47, 0xb1, 0x50, 0x68, 0xbb, 0xf9, 0xc5, 0xf0, 0x21, 0x54, 0x4e, 0x95, 0x9c, 0x47, 0xb6, 0x62,
|
||||
0x2f, 0x21, 0x0c, 0xc7, 0x99, 0x40, 0xcd, 0x52, 0xd8, 0x0e, 0x54, 0x8f, 0x17, 0x87, 0x49, 0xbf,
|
||||
0x63, 0xcb, 0x05, 0xce, 0xa7, 0x16, 0x81, 0x35, 0xc8, 0x20, 0xd8, 0x75, 0x28, 0x1f, 0x2f, 0x46,
|
||||
0x03, 0xf3, 0x06, 0xc4, 0x4a, 0x86, 0xb3, 0x5e, 0xd5, 0x18, 0xe4, 0xdc, 0x87, 0x56, 0x5e, 0x0e,
|
||||
0x9d, 0x92, 0xeb, 0xa3, 0x68, 0x9c, 0x95, 0xec, 0xe2, 0x6b, 0x4a, 0xf6, 0xce, 0x36, 0xd4, 0xec,
|
||||
0xff, 0x1b, 0xd6, 0x80, 0xca, 0xa3, 0xc3, 0xc9, 0xf0, 0x61, 0x7b, 0x85, 0xd5, 0xa1, 0x7c, 0x30,
|
||||
0x9e, 0x3c, 0x6c, 0x17, 0x70, 0x74, 0x38, 0x3e, 0x1c, 0xb6, 0x8b, 0x3b, 0x37, 0xa1, 0x95, 0xff,
|
||||
0x83, 0xc3, 0x9a, 0x50, 0x9b, 0xec, 0x1f, 0x0e, 0x7a, 0xe3, 0x5f, 0xb6, 0x57, 0x58, 0x0b, 0xea,
|
||||
0xa3, 0xc3, 0xc9, 0xb0, 0xff, 0x88, 0x0f, 0xdb, 0x85, 0x9d, 0x5f, 0x40, 0x23, 0xfd, 0x91, 0x80,
|
||||
0x1a, 0x7a, 0xa3, 0xc3, 0x41, 0x7b, 0x85, 0x01, 0x54, 0x27, 0xc3, 0x3e, 0x1f, 0xa2, 0xde, 0x1a,
|
||||
0x94, 0x26, 0x93, 0x83, 0x76, 0x11, 0x57, 0xed, 0xef, 0xf7, 0x0f, 0x86, 0xed, 0x12, 0x0e, 0x1f,
|
||||
0x3e, 0x38, 0xba, 0x37, 0x69, 0x97, 0x77, 0x3e, 0x83, 0x6b, 0x97, 0x1e, 0xeb, 0x24, 0x7d, 0xb0,
|
||||
0xcf, 0x87, 0xa8, 0xa9, 0x09, 0xb5, 0x23, 0x3e, 0x7a, 0xbc, 0xff, 0x70, 0xd8, 0x2e, 0x20, 0xe3,
|
||||
0xfe, 0xb8, 0xff, 0xd5, 0x70, 0xd0, 0x2e, 0xf6, 0x6e, 0x7c, 0xf7, 0x7c, 0xa3, 0xf0, 0xfd, 0xf3,
|
||||
0x8d, 0xc2, 0x0f, 0xcf, 0x37, 0x0a, 0x7f, 0x7f, 0xbe, 0x51, 0xf8, 0xf6, 0xc5, 0xc6, 0xca, 0xf7,
|
||||
0x2f, 0x36, 0x56, 0x7e, 0x78, 0xb1, 0xb1, 0x72, 0x5c, 0xa5, 0xff, 0xa9, 0x9f, 0xfe, 0x3b, 0x00,
|
||||
0x00, 0xff, 0xff, 0x3a, 0xd6, 0xef, 0x88, 0x8f, 0x15, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *Op) Marshal() (dAtA []byte, err error) {
|
||||
|
@ -3726,6 +3755,13 @@ func (m *ProxyEnv) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.AllProxy) > 0 {
|
||||
i -= len(m.AllProxy)
|
||||
copy(dAtA[i:], m.AllProxy)
|
||||
i = encodeVarintOps(dAtA, i, uint64(len(m.AllProxy)))
|
||||
i--
|
||||
dAtA[i] = 0x2a
|
||||
}
|
||||
if len(m.NoProxy) > 0 {
|
||||
i -= len(m.NoProxy)
|
||||
copy(dAtA[i:], m.NoProxy)
|
||||
|
@ -4087,6 +4123,24 @@ func (m *FileActionCopy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.ExcludePatterns) > 0 {
|
||||
for iNdEx := len(m.ExcludePatterns) - 1; iNdEx >= 0; iNdEx-- {
|
||||
i -= len(m.ExcludePatterns[iNdEx])
|
||||
copy(dAtA[i:], m.ExcludePatterns[iNdEx])
|
||||
i = encodeVarintOps(dAtA, i, uint64(len(m.ExcludePatterns[iNdEx])))
|
||||
i--
|
||||
dAtA[i] = 0x6a
|
||||
}
|
||||
}
|
||||
if len(m.IncludePatterns) > 0 {
|
||||
for iNdEx := len(m.IncludePatterns) - 1; iNdEx >= 0; iNdEx-- {
|
||||
i -= len(m.IncludePatterns[iNdEx])
|
||||
copy(dAtA[i:], m.IncludePatterns[iNdEx])
|
||||
i = encodeVarintOps(dAtA, i, uint64(len(m.IncludePatterns[iNdEx])))
|
||||
i--
|
||||
dAtA[i] = 0x62
|
||||
}
|
||||
}
|
||||
if m.Timestamp != 0 {
|
||||
i = encodeVarintOps(dAtA, i, uint64(m.Timestamp))
|
||||
i--
|
||||
|
@ -5061,6 +5115,10 @@ func (m *ProxyEnv) Size() (n int) {
|
|||
if l > 0 {
|
||||
n += 1 + l + sovOps(uint64(l))
|
||||
}
|
||||
l = len(m.AllProxy)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovOps(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -5250,6 +5308,18 @@ func (m *FileActionCopy) Size() (n int) {
|
|||
if m.Timestamp != 0 {
|
||||
n += 1 + sovOps(uint64(m.Timestamp))
|
||||
}
|
||||
if len(m.IncludePatterns) > 0 {
|
||||
for _, s := range m.IncludePatterns {
|
||||
l = len(s)
|
||||
n += 1 + l + sovOps(uint64(l))
|
||||
}
|
||||
}
|
||||
if len(m.ExcludePatterns) > 0 {
|
||||
for _, s := range m.ExcludePatterns {
|
||||
l = len(s)
|
||||
n += 1 + l + sovOps(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -5678,10 +5748,7 @@ func (m *Op) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -5891,10 +5958,7 @@ func (m *Platform) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -5995,10 +6059,7 @@ func (m *Input) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -6156,10 +6217,7 @@ func (m *ExecOp) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -6439,10 +6497,7 @@ func (m *Meta) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -6773,10 +6828,7 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -6877,10 +6929,7 @@ func (m *CacheOpt) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -7039,10 +7088,7 @@ func (m *SecretOpt) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -7201,10 +7247,7 @@ func (m *SSHOpt) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -7396,7 +7439,7 @@ func (m *SourceOp) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -7413,10 +7456,7 @@ func (m *SourceOp) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -7597,7 +7637,7 @@ func (m *BuildOp) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -7760,7 +7800,7 @@ func (m *BuildOp) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -7777,10 +7817,7 @@ func (m *BuildOp) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -7849,10 +7886,7 @@ func (m *BuildInput) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -8032,7 +8066,7 @@ func (m *OpMetadata) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -8183,7 +8217,7 @@ func (m *OpMetadata) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -8200,10 +8234,7 @@ func (m *OpMetadata) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -8365,7 +8396,7 @@ func (m *Source) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -8416,10 +8447,7 @@ func (m *Source) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -8503,10 +8531,7 @@ func (m *Locations) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -8658,10 +8683,7 @@ func (m *SourceInfo) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -8764,10 +8786,7 @@ func (m *Location) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -8883,10 +8902,7 @@ func (m *Range) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -8974,10 +8990,7 @@ func (m *Position) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -9047,10 +9060,7 @@ func (m *ExportCache) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -9222,16 +9232,45 @@ func (m *ProxyEnv) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
m.NoProxy = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 5:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field AllProxy", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowOps
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.AllProxy = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipOps(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -9313,10 +9352,7 @@ func (m *WorkerConstraints) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -9510,7 +9546,7 @@ func (m *Definition) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
|
@ -9563,10 +9599,7 @@ func (m *Definition) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -9680,10 +9713,7 @@ func (m *HostIP) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -9767,10 +9797,7 @@ func (m *FileOp) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -10017,10 +10044,7 @@ func (m *FileAction) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -10322,16 +10346,77 @@ func (m *FileActionCopy) Unmarshal(dAtA []byte) error {
|
|||
break
|
||||
}
|
||||
}
|
||||
case 12:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field IncludePatterns", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowOps
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.IncludePatterns = append(m.IncludePatterns, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
case 13:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ExcludePatterns", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowOps
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.ExcludePatterns = append(m.ExcludePatterns, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipOps(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -10521,10 +10606,7 @@ func (m *FileActionMkFile) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -10700,10 +10782,7 @@ func (m *FileActionMkDir) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -10825,10 +10904,7 @@ func (m *FileActionRm) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -10950,10 +11026,7 @@ func (m *ChownOpt) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -11058,10 +11131,7 @@ func (m *UserOpt) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
@ -11162,10 +11232,7 @@ func (m *NamedUserOpt) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthOps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
|
5
vendor/github.com/moby/buildkit/solver/pb/ops.proto
generated
vendored
5
vendor/github.com/moby/buildkit/solver/pb/ops.proto
generated
vendored
|
@ -224,6 +224,7 @@ message ProxyEnv {
|
|||
string https_proxy = 2;
|
||||
string ftp_proxy = 3;
|
||||
string no_proxy = 4;
|
||||
string all_proxy = 5;
|
||||
}
|
||||
|
||||
// WorkerConstraints defines conditions for the worker
|
||||
|
@ -290,6 +291,10 @@ message FileActionCopy {
|
|||
bool allowEmptyWildcard = 10;
|
||||
// optional created time override
|
||||
int64 timestamp = 11;
|
||||
// include only files/dirs matching at least one of these patterns
|
||||
repeated string include_patterns = 12;
|
||||
// exclude files/dir matching any of these patterns (even if they match an include pattern)
|
||||
repeated string exclude_patterns = 13;
|
||||
}
|
||||
|
||||
message FileActionMkFile {
|
||||
|
|
10
vendor/github.com/moby/buildkit/solver/scheduler.go
generated
vendored
10
vendor/github.com/moby/buildkit/solver/scheduler.go
generated
vendored
|
@ -170,9 +170,13 @@ postUnpark:
|
|||
// skip this if not at least 1 key per dep
|
||||
origEdge := e.index.LoadOrStore(k, e)
|
||||
if origEdge != nil {
|
||||
logrus.Debugf("merging edge %s to %s\n", e.edge.Vertex.Name(), origEdge.edge.Vertex.Name())
|
||||
if s.mergeTo(origEdge, e) {
|
||||
s.ef.setEdge(e.edge, origEdge)
|
||||
if e.isDep(origEdge) || origEdge.isDep(e) {
|
||||
logrus.Debugf("skip merge due to dependency")
|
||||
} else {
|
||||
logrus.Debugf("merging edge %s to %s\n", e.edge.Vertex.Name(), origEdge.edge.Vertex.Name())
|
||||
if s.mergeTo(origEdge, e) {
|
||||
s.ef.setEdge(e.edge, origEdge)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
5
vendor/github.com/moby/buildkit/solver/types.go
generated
vendored
5
vendor/github.com/moby/buildkit/solver/types.go
generated
vendored
|
@ -135,6 +135,8 @@ type CacheLink struct {
|
|||
Selector digest.Digest `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ReleaseFunc func()
|
||||
|
||||
// Op defines how the solver can evaluate the properties of a vertex operation.
|
||||
// An op is executed in the worker, and is retrieved from the vertex by the
|
||||
// value of `vertex.Sys()`. The solver is configured with a resolve function to
|
||||
|
@ -146,6 +148,9 @@ type Op interface {
|
|||
|
||||
// Exec runs an operation given results from previous operations.
|
||||
Exec(ctx context.Context, g session.Group, inputs []Result) (outputs []Result, err error)
|
||||
|
||||
// Acquire acquires the necessary resources to execute the `Op`.
|
||||
Acquire(ctx context.Context) (release ReleaseFunc, err error)
|
||||
}
|
||||
|
||||
type ResultBasedCacheFunc func(context.Context, Result, session.Group) (digest.Digest, error)
|
||||
|
|
74
vendor/github.com/moby/buildkit/source/git/gitsource.go
generated
vendored
74
vendor/github.com/moby/buildkit/source/git/gitsource.go
generated
vendored
|
@ -11,6 +11,7 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
@ -75,7 +76,7 @@ func (gs *gitSource) mountRemote(ctx context.Context, remote string, auth []stri
|
|||
|
||||
sis, err := gs.md.Search(remoteKey)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "failed to search metadata for %s", remote)
|
||||
return "", nil, errors.Wrapf(err, "failed to search metadata for %s", redactCredentials(remote))
|
||||
}
|
||||
|
||||
var remoteRef cache.MutableRef
|
||||
|
@ -84,19 +85,19 @@ func (gs *gitSource) mountRemote(ctx context.Context, remote string, auth []stri
|
|||
if err != nil {
|
||||
if errors.Is(err, cache.ErrLocked) {
|
||||
// should never really happen as no other function should access this metadata, but lets be graceful
|
||||
logrus.Warnf("mutable ref for %s %s was locked: %v", remote, si.ID(), err)
|
||||
logrus.Warnf("mutable ref for %s %s was locked: %v", redactCredentials(remote), si.ID(), err)
|
||||
continue
|
||||
}
|
||||
return "", nil, errors.Wrapf(err, "failed to get mutable ref for %s", remote)
|
||||
return "", nil, errors.Wrapf(err, "failed to get mutable ref for %s", redactCredentials(remote))
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
initializeRepo := false
|
||||
if remoteRef == nil {
|
||||
remoteRef, err = gs.cache.New(ctx, nil, g, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("shared git repo for %s", remote)))
|
||||
remoteRef, err = gs.cache.New(ctx, nil, g, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("shared git repo for %s", redactCredentials(remote))))
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "failed to create new mutable for %s", remote)
|
||||
return "", nil, errors.Wrapf(err, "failed to create new mutable for %s", redactCredentials(remote))
|
||||
}
|
||||
initializeRepo = true
|
||||
}
|
||||
|
@ -140,10 +141,10 @@ func (gs *gitSource) mountRemote(ctx context.Context, remote string, auth []stri
|
|||
// same new remote metadata
|
||||
si, _ := gs.md.Get(remoteRef.ID())
|
||||
v, err := metadata.NewValue(remoteKey)
|
||||
v.Index = remoteKey
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
v.Index = remoteKey
|
||||
|
||||
if err := si.Update(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, "git-remote", v)
|
||||
|
@ -170,6 +171,9 @@ func (gs *gitSourceHandler) shaToCacheKey(sha string) string {
|
|||
if gs.src.KeepGitDir {
|
||||
key += ".git"
|
||||
}
|
||||
if gs.src.Subdir != "" {
|
||||
key += ":" + gs.src.Subdir
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
|
@ -348,7 +352,7 @@ func (gs *gitSourceHandler) CacheKey(ctx context.Context, g session.Group, index
|
|||
|
||||
buf, err := gitWithinDir(ctx, gitDir, "", sock, knownHosts, gs.auth, "ls-remote", "origin", ref)
|
||||
if err != nil {
|
||||
return "", nil, false, errors.Wrapf(err, "failed to fetch remote %s", remote)
|
||||
return "", nil, false, errors.Wrapf(err, "failed to fetch remote %s", redactCredentials(remote))
|
||||
}
|
||||
out := buf.String()
|
||||
idx := strings.Index(out, "\t")
|
||||
|
@ -450,13 +454,13 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out
|
|||
// TODO: is there a better way to do this?
|
||||
}
|
||||
if _, err := gitWithinDir(ctx, gitDir, "", sock, knownHosts, gs.auth, args...); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to fetch remote %s", gs.src.Remote)
|
||||
return nil, errors.Wrapf(err, "failed to fetch remote %s", redactCredentials(gs.src.Remote))
|
||||
}
|
||||
}
|
||||
|
||||
checkoutRef, err := gs.cache.New(ctx, nil, g, cache.WithRecordType(client.UsageRecordTypeGitCheckout), cache.WithDescription(fmt.Sprintf("git snapshot for %s#%s", gs.src.Remote, ref)))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create new mutable for %s", gs.src.Remote)
|
||||
return nil, errors.Wrapf(err, "failed to create new mutable for %s", redactCredentials(gs.src.Remote))
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
@ -480,7 +484,12 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out
|
|||
}
|
||||
}()
|
||||
|
||||
if gs.src.KeepGitDir {
|
||||
subdir := path.Clean(gs.src.Subdir)
|
||||
if subdir == "/" {
|
||||
subdir = "."
|
||||
}
|
||||
|
||||
if gs.src.KeepGitDir && subdir == "." {
|
||||
checkoutDirGit := filepath.Join(checkoutDir, ".git")
|
||||
if err := os.MkdirAll(checkoutDir, 0711); err != nil {
|
||||
return nil, err
|
||||
|
@ -509,19 +518,53 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out
|
|||
}
|
||||
_, err = gitWithinDir(ctx, checkoutDirGit, checkoutDir, sock, knownHosts, nil, "checkout", "FETCH_HEAD")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to checkout remote %s", gs.src.Remote)
|
||||
return nil, errors.Wrapf(err, "failed to checkout remote %s", redactCredentials(gs.src.Remote))
|
||||
}
|
||||
gitDir = checkoutDirGit
|
||||
} else {
|
||||
_, err = gitWithinDir(ctx, gitDir, checkoutDir, sock, knownHosts, nil, "checkout", ref, "--", ".")
|
||||
cd := checkoutDir
|
||||
if subdir != "." {
|
||||
cd, err = ioutil.TempDir(cd, "checkout")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create temporary checkout dir")
|
||||
}
|
||||
}
|
||||
_, err = gitWithinDir(ctx, gitDir, cd, sock, knownHosts, nil, "checkout", ref, "--", ".")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to checkout remote %s", gs.src.Remote)
|
||||
return nil, errors.Wrapf(err, "failed to checkout remote %s", redactCredentials(gs.src.Remote))
|
||||
}
|
||||
if subdir != "." {
|
||||
d, err := os.Open(filepath.Join(cd, subdir))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to open subdir %v", subdir)
|
||||
}
|
||||
defer func() {
|
||||
if d != nil {
|
||||
d.Close()
|
||||
}
|
||||
}()
|
||||
names, err := d.Readdirnames(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, n := range names {
|
||||
if err := os.Rename(filepath.Join(cd, subdir, n), filepath.Join(checkoutDir, n)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := d.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d = nil // reset defer
|
||||
if err := os.RemoveAll(cd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, err = gitWithinDir(ctx, gitDir, checkoutDir, sock, knownHosts, gs.auth, "submodule", "update", "--init", "--recursive", "--depth=1")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to update submodules for %s", gs.src.Remote)
|
||||
return nil, errors.Wrapf(err, "failed to update submodules for %s", redactCredentials(gs.src.Remote))
|
||||
}
|
||||
|
||||
if idmap := mount.IdentityMapping(); idmap != nil {
|
||||
|
@ -551,10 +594,11 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out
|
|||
|
||||
si, _ := gs.md.Get(snap.ID())
|
||||
v, err := metadata.NewValue(snapshotKey)
|
||||
v.Index = snapshotKey
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v.Index = snapshotKey
|
||||
|
||||
if err := si.Update(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, "git-snapshot", v)
|
||||
}); err != nil {
|
||||
|
|
17
vendor/github.com/moby/buildkit/source/git/redact_credentials.go
generated
vendored
Normal file
17
vendor/github.com/moby/buildkit/source/git/redact_credentials.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
// +build go1.15
|
||||
|
||||
package git
|
||||
|
||||
import "net/url"
|
||||
|
||||
// redactCredentials takes a URL and redacts a password from it.
|
||||
// e.g. "https://user:password@github.com/user/private-repo-failure.git" will be changed to
|
||||
// "https://user:xxxxx@github.com/user/private-repo-failure.git"
|
||||
func redactCredentials(s string) string {
|
||||
u, err := url.Parse(s)
|
||||
if err != nil {
|
||||
return s // string is not a URL, just return it
|
||||
}
|
||||
|
||||
return u.Redacted()
|
||||
}
|
30
vendor/github.com/moby/buildkit/source/git/redact_credentials_go114.go
generated
vendored
Normal file
30
vendor/github.com/moby/buildkit/source/git/redact_credentials_go114.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
// +build !go1.15
|
||||
|
||||
package git
|
||||
|
||||
import "net/url"
|
||||
|
||||
// redactCredentials takes a URL and redacts a password from it.
|
||||
// e.g. "https://user:password@github.com/user/private-repo-failure.git" will be changed to
|
||||
// "https://user:xxxxx@github.com/user/private-repo-failure.git"
|
||||
func redactCredentials(s string) string {
|
||||
u, err := url.Parse(s)
|
||||
if err != nil {
|
||||
return s // string is not a URL, just return it
|
||||
}
|
||||
|
||||
return urlRedacted(u)
|
||||
}
|
||||
|
||||
// urlRedacted comes from go's url.Redacted() which isn't available on go < 1.15
|
||||
func urlRedacted(u *url.URL) string {
|
||||
if u == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
ru := *u
|
||||
if _, has := ru.User.Password(); has {
|
||||
ru.User = url.UserPassword(ru.User.Username(), "xxxxx")
|
||||
}
|
||||
return ru.String()
|
||||
}
|
12
vendor/github.com/moby/buildkit/source/gitidentifier.go
generated
vendored
12
vendor/github.com/moby/buildkit/source/gitidentifier.go
generated
vendored
|
@ -2,10 +2,10 @@ package source
|
|||
|
||||
import (
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/moby/buildkit/util/sshutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type GitIdentifier struct {
|
||||
|
@ -27,8 +27,8 @@ func NewGitIdentifier(remoteURL string) (*GitIdentifier, error) {
|
|||
}
|
||||
|
||||
var fragment string
|
||||
if strings.HasPrefix(remoteURL, "git@") {
|
||||
// git@.. is not an URL, so cannot be parsed as URL
|
||||
if sshutil.IsImplicitSSHTransport(remoteURL) {
|
||||
// implicit ssh urls such as "git@.." are not actually a URL, so cannot be parsed as URL
|
||||
parts := strings.SplitN(remoteURL, "#", 2)
|
||||
|
||||
repo.Remote = parts[0]
|
||||
|
@ -46,8 +46,8 @@ func NewGitIdentifier(remoteURL string) (*GitIdentifier, error) {
|
|||
u.Fragment = ""
|
||||
repo.Remote = u.String()
|
||||
}
|
||||
if repo.Subdir != "" {
|
||||
return nil, errors.Errorf("subdir not supported yet")
|
||||
if sd := path.Clean(repo.Subdir); sd == "/" || sd == "." {
|
||||
repo.Subdir = ""
|
||||
}
|
||||
return &repo, nil
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ func (i *GitIdentifier) ID() string {
|
|||
// isGitTransport returns true if the provided str is a git transport by inspecting
|
||||
// the prefix of the string for known protocols used in git.
|
||||
func isGitTransport(str string) bool {
|
||||
return strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://") || strings.HasPrefix(str, "git://") || sshutil.IsSSHTransport(str)
|
||||
return strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://") || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "ssh://") || sshutil.IsImplicitSSHTransport(str)
|
||||
}
|
||||
|
||||
func getRefAndSubdir(fragment string) (ref string, subdir string) {
|
||||
|
|
7
vendor/github.com/moby/buildkit/util/apicaps/caps.go
generated
vendored
7
vendor/github.com/moby/buildkit/util/apicaps/caps.go
generated
vendored
|
@ -122,6 +122,13 @@ func (s *CapSet) Supports(id CapID) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Contains checks if cap set contains cap. Note that unlike Supports() this
|
||||
// function only checks capability existence in remote set, not if cap has been initialized.
|
||||
func (s *CapSet) Contains(id CapID) bool {
|
||||
_, ok := s.set[string(id)]
|
||||
return ok
|
||||
}
|
||||
|
||||
// CapError is an error for unsupported capability
|
||||
type CapError struct {
|
||||
ID CapID
|
||||
|
|
5
vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go
generated
vendored
5
vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go
generated
vendored
|
@ -465,10 +465,7 @@ func (m *APICap) Unmarshal(dAtA []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthCaps
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthCaps
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
|
22
vendor/github.com/moby/buildkit/util/archutil/detect.go
generated
vendored
22
vendor/github.com/moby/buildkit/util/archutil/detect.go
generated
vendored
|
@ -38,6 +38,12 @@ func SupportedPlatforms(noCache bool) []string {
|
|||
if p := "linux/386"; def != p && i386Supported() == nil {
|
||||
arr = append(arr, p)
|
||||
}
|
||||
if p := "linux/mips64le"; def != p && mips64leSupported() == nil {
|
||||
arr = append(arr, p)
|
||||
}
|
||||
if p := "linux/mips64"; def != p && mips64Supported() == nil {
|
||||
arr = append(arr, p)
|
||||
}
|
||||
if !strings.HasPrefix(def, "linux/arm/") && armSupported() == nil {
|
||||
arr = append(arr, "linux/arm/v7", "linux/arm/v6")
|
||||
} else if def == "linux/arm/v7" {
|
||||
|
@ -66,6 +72,12 @@ func Check(pp specs.Platform) bool {
|
|||
if p == "linux/386" && i386Supported() == nil {
|
||||
return true
|
||||
}
|
||||
if p == "linux/mips64le" && mips64leSupported() == nil {
|
||||
return true
|
||||
}
|
||||
if p == "linux/mips64" && mips64Supported() == nil {
|
||||
return true
|
||||
}
|
||||
if !strings.HasPrefix(p, "linux/arm/") && armSupported() == nil {
|
||||
return true
|
||||
}
|
||||
|
@ -110,6 +122,16 @@ func WarnIfUnsupported(pfs []string) {
|
|||
printPlatformWarning(p, err)
|
||||
}
|
||||
}
|
||||
if p == "linux/mips64le" {
|
||||
if err := mips64leSupported(); err != nil {
|
||||
printPlatformWarning(p, err)
|
||||
}
|
||||
}
|
||||
if p == "linux/mips64" {
|
||||
if err := mips64Supported(); err != nil {
|
||||
printPlatformWarning(p, err)
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(p, "linux/arm/v6") || strings.HasPrefix(p, "linux/arm/v7") {
|
||||
if err := armSupported(); err != nil {
|
||||
printPlatformWarning(p, err)
|
||||
|
|
8
vendor/github.com/moby/buildkit/util/archutil/mips64_binary.go
generated
vendored
Normal file
8
vendor/github.com/moby/buildkit/util/archutil/mips64_binary.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
// +build !mips64
|
||||
|
||||
package archutil
|
||||
|
||||
// This file is generated by running make inside the archutil package.
|
||||
// Do not edit manually.
|
||||
|
||||
const Binarymips64 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xac\x92\xb1\x4a\xf4\x40\x14\x85\xbf\x99\xcd\xe6\xff\x41\xc5\x05\x2d\xc4\x2a\xc5\x16\x8b\xc5\x60\x69\x25\xb1\x50\x10\x14\x56\x7c\x82\x04\xd7\x18\x90\x24\x6c\x26\x60\x27\xbe\x81\xcf\xe4\xbb\x08\xbe\x80\xf5\xca\x24\x93\x98\x44\xa6\x10\x3c\x45\xce\xdc\x93\x7b\x72\xc3\x9c\xfb\x7c\x7e\x75\x21\xa5\xa0\x83\xe4\x3f\x60\x04\x11\x20\x8e\xad\x1a\x36\x24\x5e\x83\xa6\x3a\x61\x42\x88\xcf\xb4\x80\x09\xe0\xd9\xbe\x8f\xc6\x37\x60\x83\x83\x11\xb7\x33\xa6\xdf\x83\xeb\xfe\x3e\x9b\x63\x38\x60\x01\x77\xfa\xfd\x06\xf0\xf9\x1d\x66\xe6\x37\xa5\x44\xf4\xe7\xd5\x8f\x45\x57\xef\x3b\xbc\x22\x10\x2f\xed\x3d\x30\xf7\x60\x2e\xf7\xde\x80\x6d\x53\x9f\x01\xbb\x49\x56\xd5\x5f\xfb\xe7\x09\x54\xf9\x50\xea\xb5\x8e\x62\xd4\xf5\xe5\xf2\x56\x45\x71\x7a\xff\x18\x25\xa5\x2d\xf3\x42\xa7\x79\x56\xa2\xf4\xea\x49\xa3\x92\xac\x52\x91\xd6\xeb\x34\xae\xf4\xaa\xe4\x4f\xb0\x55\xc0\x51\x97\xe6\x30\x87\x71\x1e\xf4\xf2\x68\xf5\xc3\x02\x76\xac\x68\xfd\xa2\x7f\x65\x06\x0b\x87\x5f\xd8\x77\x6d\xa3\x3f\xdc\xa3\x6e\x9f\x66\x3f\xf3\xe9\xa0\xf2\xcd\xe6\xd3\x11\x45\xe8\xf0\x8f\x73\x9d\x38\xfc\x4b\x7b\x38\x75\xf9\xbf\x02\x00\x00\xff\xff\x61\x89\x8d\x22\x10\x03\x00\x00"
|
7
vendor/github.com/moby/buildkit/util/archutil/mips64_check.go
generated
vendored
Normal file
7
vendor/github.com/moby/buildkit/util/archutil/mips64_check.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
// +build !mips64
|
||||
|
||||
package archutil
|
||||
|
||||
func mips64Supported() error {
|
||||
return check(Binarymips64)
|
||||
}
|
7
vendor/github.com/moby/buildkit/util/archutil/mips64_check_mips64.go
generated
vendored
Normal file
7
vendor/github.com/moby/buildkit/util/archutil/mips64_check_mips64.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
// +build mips64
|
||||
|
||||
package archutil
|
||||
|
||||
func mips64Supported() error {
|
||||
return nil
|
||||
}
|
8
vendor/github.com/moby/buildkit/util/archutil/mips64le_binary.go
generated
vendored
Normal file
8
vendor/github.com/moby/buildkit/util/archutil/mips64le_binary.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
// +build !mips64le
|
||||
|
||||
package archutil
|
||||
|
||||
// This file is generated by running make inside the archutil package.
|
||||
// Do not edit manually.
|
||||
|
||||
const Binarymips64le = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xa4\x92\xb1\x4a\xf4\x40\x14\x85\xbf\x99\xec\xee\xbf\xf0\x2b\x0a\x5a\x88\x55\x8a\x2d\x82\xc5\x90\xd2\x4a\xc6\x42\x41\x50\x58\xf1\x09\x12\x5c\x63\x40\x92\xb0\x99\x80\x9d\xf8\x06\x3e\x93\xef\x22\xec\x0b\x58\xaf\x84\xcc\xac\x26\x26\x28\x78\x9b\x73\xcf\xbd\x73\x12\xe6\x9c\x79\x3a\xbb\x3c\x97\x42\xe0\x4a\x32\xa5\x66\xa1\xc0\xaf\x51\xdb\xf9\xcb\xe7\x11\x5f\x73\x8c\x87\x66\xc2\x18\x0f\x8a\x11\xb0\xb2\xcb\x15\x8d\xce\xe1\x81\x9d\x3b\x9c\x5a\xac\x77\x63\x5a\xe5\x8b\x2f\xa8\x2d\xd1\x9b\xff\x36\xcd\xf5\x9b\xb9\x9d\xf0\xfb\xda\xdd\x74\x1e\x52\xd2\xf9\x1a\x88\xa0\xc1\xfd\x01\x7d\xf8\x2c\x7c\xd1\x9a\x8c\x66\xaf\x7b\x72\xb6\x65\xd9\xe9\x0e\x90\x64\x15\xe2\x5f\xbd\x13\xa8\xf2\xbe\x34\x4b\x13\xc5\xa8\xab\x8b\xf9\x8d\x8a\xe2\xf4\xee\x21\x4a\x4a\x4b\xf3\xc2\xa4\x79\x56\xa2\xcc\xe2\xd1\xa0\x92\xac\x52\x91\x31\xcb\x34\xae\xcc\xa2\xe4\xef\xf5\x1f\x38\x82\x42\x7e\xcf\xa3\x95\x03\x9d\x3c\xdc\xfc\x10\xd8\x6e\xf4\x53\x6b\x54\x73\x7f\x6b\x42\x30\xa0\x77\x1e\x05\xb6\x77\x19\xb9\x77\x14\x8a\x6e\x1e\xfd\x5c\x01\xef\xeb\x75\xde\xbd\x97\x1e\xd0\x8b\x1e\xee\xf5\xf8\x32\xb7\x07\x4f\x7e\xd0\x7f\x04\x00\x00\xff\xff\xd1\x67\xf7\xcd\x10\x03\x00\x00"
|
7
vendor/github.com/moby/buildkit/util/archutil/mips64le_check.go
generated
vendored
Normal file
7
vendor/github.com/moby/buildkit/util/archutil/mips64le_check.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
// +build !mips64le
|
||||
|
||||
package archutil
|
||||
|
||||
func mips64leSupported() error {
|
||||
return check(Binarymips64le)
|
||||
}
|
7
vendor/github.com/moby/buildkit/util/archutil/mips64le_check_mips64le.go
generated
vendored
Normal file
7
vendor/github.com/moby/buildkit/util/archutil/mips64le_check_mips64le.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
// +build mips64le
|
||||
|
||||
package archutil
|
||||
|
||||
func mips64leSupported() error {
|
||||
return nil
|
||||
}
|
13
vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go
generated
vendored
13
vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go
generated
vendored
|
@ -57,13 +57,12 @@ func WithInsecureSpec() oci.SpecOpts {
|
|||
"CAP_NET_ADMIN",
|
||||
"CAP_NET_BROADCAST",
|
||||
}
|
||||
for _, cap := range addCaps {
|
||||
s.Process.Capabilities.Bounding = append(s.Process.Capabilities.Bounding, cap)
|
||||
s.Process.Capabilities.Ambient = append(s.Process.Capabilities.Ambient, cap)
|
||||
s.Process.Capabilities.Effective = append(s.Process.Capabilities.Effective, cap)
|
||||
s.Process.Capabilities.Inheritable = append(s.Process.Capabilities.Inheritable, cap)
|
||||
s.Process.Capabilities.Permitted = append(s.Process.Capabilities.Permitted, cap)
|
||||
}
|
||||
s.Process.Capabilities.Bounding = append(s.Process.Capabilities.Bounding, addCaps...)
|
||||
s.Process.Capabilities.Ambient = append(s.Process.Capabilities.Ambient, addCaps...)
|
||||
s.Process.Capabilities.Effective = append(s.Process.Capabilities.Effective, addCaps...)
|
||||
s.Process.Capabilities.Inheritable = append(s.Process.Capabilities.Inheritable, addCaps...)
|
||||
s.Process.Capabilities.Permitted = append(s.Process.Capabilities.Permitted, addCaps...)
|
||||
|
||||
s.Linux.ReadonlyPaths = []string{}
|
||||
s.Linux.MaskedPaths = []string{}
|
||||
s.Process.ApparmorProfile = ""
|
||||
|
|
46
vendor/github.com/moby/buildkit/util/gitutil/git_protocol.go
generated
vendored
Normal file
46
vendor/github.com/moby/buildkit/util/gitutil/git_protocol.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
package gitutil
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/moby/buildkit/util/sshutil"
|
||||
)
|
||||
|
||||
const (
|
||||
HTTPProtocol = iota + 1
|
||||
HTTPSProtocol
|
||||
SSHProtocol
|
||||
GitProtocol
|
||||
UnknownProtocol
|
||||
)
|
||||
|
||||
// ParseProtocol parses a git URL and returns the remote url and protocol type
|
||||
func ParseProtocol(remote string) (string, int) {
|
||||
prefixes := map[string]int{
|
||||
"http://": HTTPProtocol,
|
||||
"https://": HTTPSProtocol,
|
||||
"git://": GitProtocol,
|
||||
"ssh://": SSHProtocol,
|
||||
}
|
||||
protocolType := UnknownProtocol
|
||||
for prefix, potentialType := range prefixes {
|
||||
if strings.HasPrefix(remote, prefix) {
|
||||
remote = strings.TrimPrefix(remote, prefix)
|
||||
protocolType = potentialType
|
||||
}
|
||||
}
|
||||
|
||||
if protocolType == UnknownProtocol && sshutil.IsImplicitSSHTransport(remote) {
|
||||
protocolType = SSHProtocol
|
||||
}
|
||||
|
||||
// remove name from ssh
|
||||
if protocolType == SSHProtocol {
|
||||
parts := strings.SplitN(remote, "@", 2)
|
||||
if len(parts) == 2 {
|
||||
remote = parts[1]
|
||||
}
|
||||
}
|
||||
|
||||
return remote, protocolType
|
||||
}
|
13
vendor/github.com/moby/buildkit/util/progress/logs/logs.go
generated
vendored
13
vendor/github.com/moby/buildkit/util/progress/logs/logs.go
generated
vendored
|
@ -17,8 +17,8 @@ import (
|
|||
"github.com/tonistiigi/units"
|
||||
)
|
||||
|
||||
var defaultMaxLogSize = 1024 * 1024
|
||||
var defaultMaxLogSpeed = 100 * 1024 // per second
|
||||
var defaultMaxLogSize = 2 * 1024 * 1024
|
||||
var defaultMaxLogSpeed = 200 * 1024 // per second
|
||||
|
||||
const (
|
||||
stdout = 1
|
||||
|
@ -71,15 +71,16 @@ func (sw *streamWriter) checkLimit(n int) int {
|
|||
maxSize = int(math.Ceil(time.Since(sw.created).Seconds())) * defaultMaxLogSpeed
|
||||
sw.clipReasonSpeed = true
|
||||
}
|
||||
if maxSize > defaultMaxLogSize {
|
||||
if maxSize == -1 || maxSize > defaultMaxLogSize {
|
||||
maxSize = defaultMaxLogSize
|
||||
sw.clipReasonSpeed = false
|
||||
}
|
||||
if maxSize < oldSize {
|
||||
return 0
|
||||
}
|
||||
|
||||
if maxSize != -1 {
|
||||
if maxSize < oldSize {
|
||||
return 0
|
||||
}
|
||||
|
||||
if sw.size > maxSize {
|
||||
return maxSize - oldSize
|
||||
}
|
||||
|
|
17
vendor/github.com/moby/buildkit/util/resolver/authorizer.go
generated
vendored
17
vendor/github.com/moby/buildkit/util/resolver/authorizer.go
generated
vendored
|
@ -40,7 +40,7 @@ func newAuthHandlerNS(sm *session.Manager) *authHandlerNS {
|
|||
}
|
||||
}
|
||||
|
||||
func (a *authHandlerNS) get(host string, sm *session.Manager, g session.Group) *authHandler {
|
||||
func (a *authHandlerNS) get(ctx context.Context, host string, sm *session.Manager, g session.Group) *authHandler {
|
||||
if g != nil {
|
||||
if iter := g.SessionIterator(); iter != nil {
|
||||
for {
|
||||
|
@ -65,7 +65,7 @@ func (a *authHandlerNS) get(host string, sm *session.Manager, g session.Group) *
|
|||
}
|
||||
if parts[0] == host {
|
||||
if h.authority != nil {
|
||||
session, ok, err := sessionauth.VerifyTokenAuthority(host, h.authority, sm, g)
|
||||
session, ok, err := sessionauth.VerifyTokenAuthority(ctx, host, h.authority, sm, g)
|
||||
if err == nil && ok {
|
||||
a.handlers[host+"/"+session] = h
|
||||
h.lastUsed = time.Now()
|
||||
|
@ -122,7 +122,7 @@ func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) err
|
|||
defer a.handlers.mu.Unlock()
|
||||
|
||||
// skip if there is no auth handler
|
||||
ah := a.handlers.get(req.URL.Host, a.sm, a.session)
|
||||
ah := a.handlers.get(ctx, req.URL.Host, a.sm, a.session)
|
||||
if ah == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R
|
|||
last := responses[len(responses)-1]
|
||||
host := last.Request.URL.Host
|
||||
|
||||
handler := a.handlers.get(host, a.sm, a.session)
|
||||
handler := a.handlers.get(ctx, host, a.sm, a.session)
|
||||
|
||||
for _, c := range auth.ParseAuthHeader(last.Header) {
|
||||
if c.Scheme == auth.BearerAuth {
|
||||
|
@ -177,7 +177,7 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R
|
|||
}
|
||||
|
||||
var username, secret string
|
||||
session, pubKey, err := sessionauth.GetTokenAuthority(host, a.sm, a.session)
|
||||
session, pubKey, err := sessionauth.GetTokenAuthority(ctx, host, a.sm, a.session)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -339,7 +339,7 @@ func (ah *authHandler) fetchToken(ctx context.Context, sm *session.Manager, g se
|
|||
}()
|
||||
|
||||
if ah.authority != nil {
|
||||
resp, err := sessionauth.FetchToken(&sessionauth.FetchTokenRequest{
|
||||
resp, err := sessionauth.FetchToken(ctx, &sessionauth.FetchTokenRequest{
|
||||
ClientID: "buildkit-client",
|
||||
Host: ah.host,
|
||||
Realm: to.Realm,
|
||||
|
@ -400,6 +400,11 @@ func (ah *authHandler) fetchToken(ctx context.Context, sm *session.Manager, g se
|
|||
}
|
||||
|
||||
func invalidAuthorization(c auth.Challenge, responses []*http.Response) error {
|
||||
lastResponse := responses[len(responses)-1]
|
||||
if lastResponse.StatusCode == http.StatusUnauthorized {
|
||||
return errors.Wrapf(docker.ErrInvalidAuthorization, "authorization status: %v", lastResponse.StatusCode)
|
||||
}
|
||||
|
||||
errStr := c.Parameters["error"]
|
||||
if errStr == "" {
|
||||
return nil
|
||||
|
|
5
vendor/github.com/moby/buildkit/util/resolver/pool.go
generated
vendored
5
vendor/github.com/moby/buildkit/util/resolver/pool.go
generated
vendored
|
@ -105,7 +105,7 @@ func newResolver(hosts docker.RegistryHosts, handler *authHandlerNS, sm *session
|
|||
handler: handler,
|
||||
}
|
||||
r.Resolver = docker.NewResolver(docker.ResolverOptions{
|
||||
Hosts: r.hostsFunc,
|
||||
Hosts: r.HostsFunc,
|
||||
})
|
||||
return r
|
||||
}
|
||||
|
@ -123,7 +123,8 @@ type Resolver struct {
|
|||
mode source.ResolveMode
|
||||
}
|
||||
|
||||
func (r *Resolver) hostsFunc(host string) ([]docker.RegistryHost, error) {
|
||||
// HostsFunc implements registry configuration of this Resolver
|
||||
func (r *Resolver) HostsFunc(host string) ([]docker.RegistryHost, error) {
|
||||
return func(domain string) ([]docker.RegistryHost, error) {
|
||||
v, err := r.handler.g.Do(context.TODO(), domain, func(ctx context.Context) (interface{}, error) {
|
||||
// long lock not needed because flightcontrol.Do
|
||||
|
|
23
vendor/github.com/moby/buildkit/util/resolver/resolver.go
generated
vendored
23
vendor/github.com/moby/buildkit/util/resolver/resolver.go
generated
vendored
|
@ -13,12 +13,11 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/moby/buildkit/cmd/buildkitd/config"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func fillInsecureOpts(host string, c config.RegistryConfig, h docker.RegistryHost) ([]docker.RegistryHost, error) {
|
||||
func fillInsecureOpts(host string, c RegistryConfig, h docker.RegistryHost) ([]docker.RegistryHost, error) {
|
||||
var hosts []docker.RegistryHost
|
||||
|
||||
tc, err := loadTLSConfig(c)
|
||||
|
@ -65,7 +64,7 @@ func fillInsecureOpts(host string, c config.RegistryConfig, h docker.RegistryHos
|
|||
return hosts, nil
|
||||
}
|
||||
|
||||
func loadTLSConfig(c config.RegistryConfig) (*tls.Config, error) {
|
||||
func loadTLSConfig(c RegistryConfig) (*tls.Config, error) {
|
||||
for _, d := range c.TLSConfigDir {
|
||||
fs, err := ioutil.ReadDir(d)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, os.ErrPermission) {
|
||||
|
@ -76,7 +75,7 @@ func loadTLSConfig(c config.RegistryConfig) (*tls.Config, error) {
|
|||
c.RootCAs = append(c.RootCAs, filepath.Join(d, f.Name()))
|
||||
}
|
||||
if strings.HasSuffix(f.Name(), ".cert") {
|
||||
c.KeyPairs = append(c.KeyPairs, config.TLSKeyPair{
|
||||
c.KeyPairs = append(c.KeyPairs, TLSKeyPair{
|
||||
Certificate: filepath.Join(d, f.Name()),
|
||||
Key: filepath.Join(d, strings.TrimSuffix(f.Name(), ".cert")+".key"),
|
||||
})
|
||||
|
@ -115,8 +114,22 @@ func loadTLSConfig(c config.RegistryConfig) (*tls.Config, error) {
|
|||
return tc, nil
|
||||
}
|
||||
|
||||
type RegistryConfig struct {
|
||||
Mirrors []string `toml:"mirrors"`
|
||||
PlainHTTP *bool `toml:"http"`
|
||||
Insecure *bool `toml:"insecure"`
|
||||
RootCAs []string `toml:"ca"`
|
||||
KeyPairs []TLSKeyPair `toml:"keypair"`
|
||||
TLSConfigDir []string `toml:"tlsconfigdir"`
|
||||
}
|
||||
|
||||
type TLSKeyPair struct {
|
||||
Key string `toml:"key"`
|
||||
Certificate string `toml:"cert"`
|
||||
}
|
||||
|
||||
// NewRegistryConfig converts registry config to docker.RegistryHosts callback
|
||||
func NewRegistryConfig(m map[string]config.RegistryConfig) docker.RegistryHosts {
|
||||
func NewRegistryConfig(m map[string]RegistryConfig) docker.RegistryHosts {
|
||||
return docker.Registries(
|
||||
func(host string) ([]docker.RegistryHost, error) {
|
||||
c, ok := m[host]
|
||||
|
|
5
vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go
generated
vendored
5
vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
@ -59,6 +60,10 @@ func retryError(err error) bool {
|
|||
if errors.Is(err, io.EOF) || errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.EPIPE) {
|
||||
return true
|
||||
}
|
||||
// catches TLS timeout or other network-related temporary errors
|
||||
if ne, ok := errors.Cause(err).(net.Error); ok && ne.Temporary() {
|
||||
return true
|
||||
}
|
||||
// https://github.com/containerd/containerd/pull/4724
|
||||
if errors.Cause(err).Error() == "no response" {
|
||||
return true
|
||||
|
|
2
vendor/github.com/moby/buildkit/util/sshutil/transport_validation.go
generated
vendored
2
vendor/github.com/moby/buildkit/util/sshutil/transport_validation.go
generated
vendored
|
@ -6,6 +6,6 @@ import (
|
|||
|
||||
var gitSSHRegex = regexp.MustCompile("^[a-zA-Z0-9-_]+@[a-zA-Z0-9-.]+:.*$")
|
||||
|
||||
func IsSSHTransport(s string) bool {
|
||||
func IsImplicitSSHTransport(s string) bool {
|
||||
return gitSSHRegex.MatchString(s)
|
||||
}
|
||||
|
|
4
vendor/github.com/moby/buildkit/util/stack/stack.pb.go
generated
vendored
4
vendor/github.com/moby/buildkit/util/stack/stack.pb.go
generated
vendored
|
@ -151,7 +151,9 @@ func init() {
|
|||
proto.RegisterType((*Frame)(nil), "stack.Frame")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("stack.proto", fileDescriptor_b44c07feb2ca0a5a) }
|
||||
func init() {
|
||||
proto.RegisterFile("stack.proto", fileDescriptor_b44c07feb2ca0a5a)
|
||||
}
|
||||
|
||||
var fileDescriptor_b44c07feb2ca0a5a = []byte{
|
||||
// 185 bytes of a gzipped FileDescriptorProto
|
||||
|
|
10
vendor/github.com/moby/buildkit/worker/worker.go
generated
vendored
10
vendor/github.com/moby/buildkit/worker/worker.go
generated
vendored
|
@ -46,8 +46,10 @@ type Infos interface {
|
|||
|
||||
// Pre-defined label keys
|
||||
const (
|
||||
labelPrefix = "org.mobyproject.buildkit.worker."
|
||||
LabelExecutor = labelPrefix + "executor" // "oci" or "containerd"
|
||||
LabelSnapshotter = labelPrefix + "snapshotter" // containerd snapshotter name ("overlay", "native", ...)
|
||||
LabelHostname = labelPrefix + "hostname"
|
||||
labelPrefix = "org.mobyproject.buildkit.worker."
|
||||
LabelExecutor = labelPrefix + "executor" // "oci" or "containerd"
|
||||
LabelSnapshotter = labelPrefix + "snapshotter" // containerd snapshotter name ("overlay", "native", ...)
|
||||
LabelHostname = labelPrefix + "hostname"
|
||||
LabelContainerdUUID = labelPrefix + "containerd.uuid" // containerd worker: containerd UUID
|
||||
LabelContainerdNamespace = labelPrefix + "containerd.namespace" // containerd worker: containerd namespace
|
||||
)
|
||||
|
|
44
vendor/github.com/moby/buildkit/worker/workercontroller.go
generated
vendored
44
vendor/github.com/moby/buildkit/worker/workercontroller.go
generated
vendored
|
@ -1,8 +1,6 @@
|
|||
package worker
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/filters"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -12,16 +10,15 @@ import (
|
|||
// Currently, only local workers are supported.
|
||||
type Controller struct {
|
||||
// TODO: define worker interface and support remote ones
|
||||
workers sync.Map
|
||||
defaultID string
|
||||
workers []Worker
|
||||
}
|
||||
|
||||
// Add adds a local worker
|
||||
// Add adds a local worker.
|
||||
// The first worker becomes the default.
|
||||
//
|
||||
// Add is not thread-safe.
|
||||
func (c *Controller) Add(w Worker) error {
|
||||
c.workers.Store(w.ID(), w)
|
||||
if c.defaultID == "" {
|
||||
c.defaultID = w.ID()
|
||||
}
|
||||
c.workers = append(c.workers, w)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -32,41 +29,38 @@ func (c *Controller) List(filterStrings ...string) ([]Worker, error) {
|
|||
return nil, err
|
||||
}
|
||||
var workers []Worker
|
||||
c.workers.Range(func(k, v interface{}) bool {
|
||||
w := v.(Worker)
|
||||
for _, w := range c.workers {
|
||||
if filter.Match(adaptWorker(w)) {
|
||||
workers = append(workers, w)
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
return workers, nil
|
||||
}
|
||||
|
||||
// GetDefault returns the default local worker
|
||||
func (c *Controller) GetDefault() (Worker, error) {
|
||||
if c.defaultID == "" {
|
||||
if len(c.workers) == 0 {
|
||||
return nil, errors.Errorf("no default worker")
|
||||
}
|
||||
return c.Get(c.defaultID)
|
||||
return c.workers[0], nil
|
||||
}
|
||||
|
||||
func (c *Controller) Get(id string) (Worker, error) {
|
||||
v, ok := c.workers.Load(id)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("worker %s not found", id)
|
||||
for _, w := range c.workers {
|
||||
if w.ID() == id {
|
||||
return w, nil
|
||||
}
|
||||
}
|
||||
return v.(Worker), nil
|
||||
return nil, errors.Errorf("worker %s not found", id)
|
||||
}
|
||||
|
||||
// TODO: add Get(Constraint) (*Worker, error)
|
||||
|
||||
// WorkerInfos returns slice of WorkerInfo.
|
||||
// The first item is the default worker.
|
||||
func (c *Controller) WorkerInfos() []client.WorkerInfo {
|
||||
workers, err := c.List()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
out := make([]client.WorkerInfo, 0, len(workers))
|
||||
for _, w := range workers {
|
||||
out := make([]client.WorkerInfo, 0, len(c.workers))
|
||||
for _, w := range c.workers {
|
||||
out = append(out, client.WorkerInfo{
|
||||
ID: w.ID(),
|
||||
Labels: w.Labels(),
|
||||
|
|
225
vendor/github.com/tonistiigi/fsutil/copy/copy.go
generated
vendored
225
vendor/github.com/tonistiigi/fsutil/copy/copy.go
generated
vendored
|
@ -11,6 +11,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
@ -86,7 +87,10 @@ func Copy(ctx context.Context, srcRoot, src, dstRoot, dst string, opts ...Opt) e
|
|||
return err
|
||||
}
|
||||
|
||||
c := newCopier(ci.Chown, ci.Utime, ci.Mode, ci.XAttrErrorHandler)
|
||||
c, err := newCopier(ci.Chown, ci.Utime, ci.Mode, ci.XAttrErrorHandler, ci.IncludePatterns, ci.ExcludePatterns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcs := []string{src}
|
||||
|
||||
if ci.AllowWildcards {
|
||||
|
@ -109,7 +113,8 @@ func Copy(ctx context.Context, srcRoot, src, dstRoot, dst string, opts ...Opt) e
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.copy(ctx, srcFollowed, dst, false); err != nil {
|
||||
skipIncludePatterns := c.includePatternMatcher == nil
|
||||
if err := c.copy(ctx, srcFollowed, "", dst, false, skipIncludePatterns); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -162,6 +167,10 @@ type CopyInfo struct {
|
|||
XAttrErrorHandler XAttrErrorHandler
|
||||
CopyDirContents bool
|
||||
FollowLinks bool
|
||||
// Include only files/dirs matching at least one of these patterns
|
||||
IncludePatterns []string
|
||||
// Exclude files/dir matching any of these patterns (even if they match an include pattern)
|
||||
ExcludePatterns []string
|
||||
}
|
||||
|
||||
type Opt func(*CopyInfo)
|
||||
|
@ -197,36 +206,112 @@ func AllowXAttrErrors(ci *CopyInfo) {
|
|||
WithXAttrErrorHandler(h)(ci)
|
||||
}
|
||||
|
||||
type copier struct {
|
||||
chown Chowner
|
||||
utime *time.Time
|
||||
mode *int
|
||||
inodes map[uint64]string
|
||||
xattrErrorHandler XAttrErrorHandler
|
||||
func WithIncludePattern(includePattern string) Opt {
|
||||
return func(ci *CopyInfo) {
|
||||
ci.IncludePatterns = append(ci.IncludePatterns, includePattern)
|
||||
}
|
||||
}
|
||||
|
||||
func newCopier(chown Chowner, tm *time.Time, mode *int, xeh XAttrErrorHandler) *copier {
|
||||
func WithExcludePattern(excludePattern string) Opt {
|
||||
return func(ci *CopyInfo) {
|
||||
ci.ExcludePatterns = append(ci.ExcludePatterns, excludePattern)
|
||||
}
|
||||
}
|
||||
|
||||
type copier struct {
|
||||
chown Chowner
|
||||
utime *time.Time
|
||||
mode *int
|
||||
inodes map[uint64]string
|
||||
xattrErrorHandler XAttrErrorHandler
|
||||
includePatternMatcher *fileutils.PatternMatcher
|
||||
excludePatternMatcher *fileutils.PatternMatcher
|
||||
parentDirs []parentDir
|
||||
}
|
||||
|
||||
type parentDir struct {
|
||||
srcPath string
|
||||
dstPath string
|
||||
copied bool
|
||||
}
|
||||
|
||||
func newCopier(chown Chowner, tm *time.Time, mode *int, xeh XAttrErrorHandler, includePatterns, excludePatterns []string) (*copier, error) {
|
||||
if xeh == nil {
|
||||
xeh = func(dst, src, key string, err error) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return &copier{inodes: map[uint64]string{}, chown: chown, utime: tm, xattrErrorHandler: xeh, mode: mode}
|
||||
|
||||
var includePatternMatcher *fileutils.PatternMatcher
|
||||
if len(includePatterns) != 0 {
|
||||
var err error
|
||||
includePatternMatcher, err = fileutils.NewPatternMatcher(includePatterns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid includepatterns: %s", includePatterns)
|
||||
}
|
||||
}
|
||||
|
||||
var excludePatternMatcher *fileutils.PatternMatcher
|
||||
if len(excludePatterns) != 0 {
|
||||
var err error
|
||||
excludePatternMatcher, err = fileutils.NewPatternMatcher(excludePatterns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid excludepatterns: %s", excludePatterns)
|
||||
}
|
||||
}
|
||||
|
||||
return &copier{
|
||||
inodes: map[uint64]string{},
|
||||
chown: chown,
|
||||
utime: tm,
|
||||
xattrErrorHandler: xeh,
|
||||
mode: mode,
|
||||
includePatternMatcher: includePatternMatcher,
|
||||
excludePatternMatcher: excludePatternMatcher,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// dest is always clean
|
||||
func (c *copier) copy(ctx context.Context, src, target string, overwriteTargetMetadata bool) error {
|
||||
func (c *copier) copy(ctx context.Context, src, srcComponents, target string, overwriteTargetMetadata, skipIncludePatterns bool) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
fi, err := os.Lstat(src)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to stat %s", src)
|
||||
}
|
||||
|
||||
include := true
|
||||
if srcComponents != "" {
|
||||
if !skipIncludePatterns {
|
||||
include, err = c.include(srcComponents, fi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
exclude, err := c.exclude(srcComponents, fi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exclude {
|
||||
include = false
|
||||
}
|
||||
}
|
||||
|
||||
if include {
|
||||
if err := c.createParentDirs(src, srcComponents, target, overwriteTargetMetadata); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !fi.IsDir() {
|
||||
if !include {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := ensureEmptyFileTarget(target); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -236,9 +321,9 @@ func (c *copier) copy(ctx context.Context, src, target string, overwriteTargetMe
|
|||
|
||||
switch {
|
||||
case fi.IsDir():
|
||||
if created, err := c.copyDirectory(ctx, src, target, fi, overwriteTargetMetadata); err != nil {
|
||||
if created, err := c.copyDirectory(ctx, src, srcComponents, target, fi, overwriteTargetMetadata, skipIncludePatterns, include); err != nil {
|
||||
return err
|
||||
} else if !overwriteTargetMetadata {
|
||||
} else if !overwriteTargetMetadata || !skipIncludePatterns {
|
||||
copyFileInfo = created
|
||||
}
|
||||
case (fi.Mode() & os.ModeType) == 0:
|
||||
|
@ -282,36 +367,101 @@ func (c *copier) copy(ctx context.Context, src, target string, overwriteTargetMe
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *copier) copyDirectory(ctx context.Context, src, dst string, stat os.FileInfo, overwriteTargetMetadata bool) (bool, error) {
|
||||
func (c *copier) include(path string, fi os.FileInfo) (bool, error) {
|
||||
if c.includePatternMatcher == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
m, err := c.includePatternMatcher.Matches(path)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to match includepatterns")
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *copier) exclude(path string, fi os.FileInfo) (bool, error) {
|
||||
if c.excludePatternMatcher == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
m, err := c.excludePatternMatcher.Matches(path)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to match excludepatterns")
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Delayed creation of parent directories when a file or dir matches an include
|
||||
// pattern.
|
||||
func (c *copier) createParentDirs(src, srcComponents, target string, overwriteTargetMetadata bool) error {
|
||||
for i, parentDir := range c.parentDirs {
|
||||
if parentDir.copied {
|
||||
continue
|
||||
}
|
||||
|
||||
fi, err := os.Stat(parentDir.srcPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to stat %s", src)
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return errors.Errorf("%s is not a directory", parentDir.srcPath)
|
||||
}
|
||||
|
||||
created, err := copyDirectoryOnly(parentDir.srcPath, parentDir.dstPath, fi, overwriteTargetMetadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if created {
|
||||
if err := c.copyFileInfo(fi, parentDir.dstPath); err != nil {
|
||||
return errors.Wrap(err, "failed to copy file info")
|
||||
}
|
||||
|
||||
if err := copyXAttrs(parentDir.dstPath, parentDir.srcPath, c.xattrErrorHandler); err != nil {
|
||||
return errors.Wrap(err, "failed to copy xattrs")
|
||||
}
|
||||
}
|
||||
|
||||
c.parentDirs[i].copied = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *copier) copyDirectory(ctx context.Context, src, srcComponents, dst string, stat os.FileInfo, overwriteTargetMetadata, skipIncludePatterns, matchedExactly bool) (bool, error) {
|
||||
if !stat.IsDir() {
|
||||
return false, errors.Errorf("source is not directory")
|
||||
}
|
||||
|
||||
created := false
|
||||
|
||||
if st, err := os.Lstat(dst); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return false, err
|
||||
}
|
||||
created = true
|
||||
if err := os.Mkdir(dst, stat.Mode()); err != nil {
|
||||
return created, errors.Wrapf(err, "failed to mkdir %s", dst)
|
||||
}
|
||||
} else if !st.IsDir() {
|
||||
return false, errors.Errorf("cannot copy to non-directory: %s", dst)
|
||||
} else if overwriteTargetMetadata {
|
||||
if err := os.Chmod(dst, stat.Mode()); err != nil {
|
||||
return false, errors.Wrapf(err, "failed to chmod on %s", dst)
|
||||
// If there are no include patterns or this directory matched an include
|
||||
// pattern exactly, go ahead and create the directory. Otherwise, delay to
|
||||
// handle include patterns like a/*/c where we do not want to create a/b
|
||||
// until we encounter a/b/c.
|
||||
if matchedExactly || skipIncludePatterns {
|
||||
var err error
|
||||
created, err = copyDirectoryOnly(src, dst, stat, overwriteTargetMetadata)
|
||||
if err != nil {
|
||||
return created, err
|
||||
}
|
||||
}
|
||||
|
||||
c.parentDirs = append(c.parentDirs, parentDir{
|
||||
srcPath: src,
|
||||
dstPath: dst,
|
||||
copied: skipIncludePatterns,
|
||||
})
|
||||
|
||||
defer func() {
|
||||
c.parentDirs = c.parentDirs[:len(c.parentDirs)-1]
|
||||
}()
|
||||
|
||||
fis, err := ioutil.ReadDir(src)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "failed to read %s", src)
|
||||
}
|
||||
|
||||
for _, fi := range fis {
|
||||
if err := c.copy(ctx, filepath.Join(src, fi.Name()), filepath.Join(dst, fi.Name()), true); err != nil {
|
||||
if err := c.copy(ctx, filepath.Join(src, fi.Name()), filepath.Join(srcComponents, fi.Name()), filepath.Join(dst, fi.Name()), true, skipIncludePatterns); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
@ -319,6 +469,25 @@ func (c *copier) copyDirectory(ctx context.Context, src, dst string, stat os.Fil
|
|||
return created, nil
|
||||
}
|
||||
|
||||
func copyDirectoryOnly(src, dst string, stat os.FileInfo, overwriteTargetMetadata bool) (bool, error) {
|
||||
if st, err := os.Lstat(dst); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return false, err
|
||||
}
|
||||
if err := os.Mkdir(dst, stat.Mode()); err != nil {
|
||||
return false, errors.Wrapf(err, "failed to mkdir %s", dst)
|
||||
}
|
||||
return true, nil
|
||||
} else if !st.IsDir() {
|
||||
return false, errors.Errorf("cannot copy to non-directory: %s", dst)
|
||||
} else if overwriteTargetMetadata {
|
||||
if err := os.Chmod(dst, stat.Mode()); err != nil {
|
||||
return false, errors.Wrapf(err, "failed to chmod on %s", dst)
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func ensureEmptyFileTarget(dst string) error {
|
||||
fi, err := os.Lstat(dst)
|
||||
if err != nil {
|
||||
|
|
3
vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go
generated
vendored
3
vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go
generated
vendored
|
@ -89,7 +89,8 @@ func copyFileContent(dst, src *os.File) error {
|
|||
|
||||
n, err := unix.CopyFileRange(int(src.Fd()), nil, int(dst.Fd()), nil, desired, 0)
|
||||
if err != nil {
|
||||
if (err != unix.ENOSYS && err != unix.EXDEV && err != unix.EPERM) || !first {
|
||||
// matches go/src/internal/poll/copy_file_range_linux.go
|
||||
if (err != unix.ENOSYS && err != unix.EXDEV && err != unix.EPERM && err != syscall.EIO && err != unix.EOPNOTSUPP && err != syscall.EINVAL) || !first {
|
||||
return errors.Wrap(err, "copy file range failed")
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue