vendor: add buildkit dependency

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
Tonis Tiigi 2018-06-08 17:39:07 -07:00
parent 0c4ed4e4ae
commit 6fcb36ff14
173 changed files with 34973 additions and 166 deletions

View file

@ -27,10 +27,13 @@ github.com/imdario/mergo 0.2.1
golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5
# buildkit
github.com/moby/buildkit 43e758232a0ac7d50c6a11413186e16684fc1e4f
github.com/tonistiigi/fsutil dc68c74458923f357474a9178bd198aa3ed11a5f
github.com/moby/buildkit b062a2d8ddbaa477c25c63d68a9cffbb43f6e474
github.com/tonistiigi/fsutil 8abad97ee3969cdf5e9c367f46adba2c212b3ddb
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716
github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc
github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
#get libnetwork packages
@ -131,7 +134,7 @@ github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b65068
golang.org/x/crypto 1a580b3eff7814fc9b40602fd35256c63b50f491
golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git
github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8
github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0

View file

@ -0,0 +1,11 @@
# contrib
The `contrib` directory contains packages that do not belong in the core containerd packages but still contribute to overall containerd usability.
Package such as Apparmor or Selinux are placed in `contrib` because they are platform dependent and often require higher level tools and profiles to work.
Packaging and other built tools can be added to `contrib` to aid in packaging containerd for various distributions.
## Testing
Code in the `contrib` directory may or may not have been tested in the normal test pipeline for core components.

View file

@ -0,0 +1,56 @@
// +build linux
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package seccomp
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/oci"
"github.com/opencontainers/runtime-spec/specs-go"
)
// WithProfile receives the name of a file stored on disk comprising a json
// formated seccomp profile, as specified by the opencontainers/runtime-spec.
// The profile is read from the file, unmarshaled, and set to the spec.
func WithProfile(profile string) oci.SpecOpts {
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
s.Linux.Seccomp = &specs.LinuxSeccomp{}
f, err := ioutil.ReadFile(profile)
if err != nil {
return fmt.Errorf("Cannot load seccomp profile %q: %v", profile, err)
}
if err := json.Unmarshal(f, s.Linux.Seccomp); err != nil {
return fmt.Errorf("Decoding seccomp profile failed %q: %v", profile, err)
}
return nil
}
}
// WithDefaultProfile sets the default seccomp profile to the spec.
// Note: must follow the setting of process capabilities
func WithDefaultProfile() oci.SpecOpts {
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
s.Linux.Seccomp = DefaultProfile(s)
return nil
}
}

View file

@ -0,0 +1,581 @@
// +build linux
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package seccomp
import (
"runtime"
"syscall"
"github.com/opencontainers/runtime-spec/specs-go"
)
func arches() []specs.Arch {
switch runtime.GOARCH {
case "amd64":
return []specs.Arch{specs.ArchX86_64, specs.ArchX86, specs.ArchX32}
case "arm64":
return []specs.Arch{specs.ArchARM, specs.ArchAARCH64}
case "mips64":
return []specs.Arch{specs.ArchMIPS, specs.ArchMIPS64, specs.ArchMIPS64N32}
case "mips64n32":
return []specs.Arch{specs.ArchMIPS, specs.ArchMIPS64, specs.ArchMIPS64N32}
case "mipsel64":
return []specs.Arch{specs.ArchMIPSEL, specs.ArchMIPSEL64, specs.ArchMIPSEL64N32}
case "mipsel64n32":
return []specs.Arch{specs.ArchMIPSEL, specs.ArchMIPSEL64, specs.ArchMIPSEL64N32}
case "s390x":
return []specs.Arch{specs.ArchS390, specs.ArchS390X}
default:
return []specs.Arch{}
}
}
// DefaultProfile defines the whitelist for the default seccomp profile.
func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
syscalls := []specs.LinuxSyscall{
{
Names: []string{
"accept",
"accept4",
"access",
"alarm",
"alarm",
"bind",
"brk",
"capget",
"capset",
"chdir",
"chmod",
"chown",
"chown32",
"clock_getres",
"clock_gettime",
"clock_nanosleep",
"close",
"connect",
"copy_file_range",
"creat",
"dup",
"dup2",
"dup3",
"epoll_create",
"epoll_create1",
"epoll_ctl",
"epoll_ctl_old",
"epoll_pwait",
"epoll_wait",
"epoll_wait_old",
"eventfd",
"eventfd2",
"execve",
"execveat",
"exit",
"exit_group",
"faccessat",
"fadvise64",
"fadvise64_64",
"fallocate",
"fanotify_mark",
"fchdir",
"fchmod",
"fchmodat",
"fchown",
"fchown32",
"fchownat",
"fcntl",
"fcntl64",
"fdatasync",
"fgetxattr",
"flistxattr",
"flock",
"fork",
"fremovexattr",
"fsetxattr",
"fstat",
"fstat64",
"fstatat64",
"fstatfs",
"fstatfs64",
"fsync",
"ftruncate",
"ftruncate64",
"futex",
"futimesat",
"getcpu",
"getcwd",
"getdents",
"getdents64",
"getegid",
"getegid32",
"geteuid",
"geteuid32",
"getgid",
"getgid32",
"getgroups",
"getgroups32",
"getitimer",
"getpeername",
"getpgid",
"getpgrp",
"getpid",
"getppid",
"getpriority",
"getrandom",
"getresgid",
"getresgid32",
"getresuid",
"getresuid32",
"getrlimit",
"get_robust_list",
"getrusage",
"getsid",
"getsockname",
"getsockopt",
"get_thread_area",
"gettid",
"gettimeofday",
"getuid",
"getuid32",
"getxattr",
"inotify_add_watch",
"inotify_init",
"inotify_init1",
"inotify_rm_watch",
"io_cancel",
"ioctl",
"io_destroy",
"io_getevents",
"ioprio_get",
"ioprio_set",
"io_setup",
"io_submit",
"ipc",
"kill",
"lchown",
"lchown32",
"lgetxattr",
"link",
"linkat",
"listen",
"listxattr",
"llistxattr",
"_llseek",
"lremovexattr",
"lseek",
"lsetxattr",
"lstat",
"lstat64",
"madvise",
"memfd_create",
"mincore",
"mkdir",
"mkdirat",
"mknod",
"mknodat",
"mlock",
"mlock2",
"mlockall",
"mmap",
"mmap2",
"mprotect",
"mq_getsetattr",
"mq_notify",
"mq_open",
"mq_timedreceive",
"mq_timedsend",
"mq_unlink",
"mremap",
"msgctl",
"msgget",
"msgrcv",
"msgsnd",
"msync",
"munlock",
"munlockall",
"munmap",
"nanosleep",
"newfstatat",
"_newselect",
"open",
"openat",
"pause",
"pipe",
"pipe2",
"poll",
"ppoll",
"prctl",
"pread64",
"preadv",
"prlimit64",
"pselect6",
"pwrite64",
"pwritev",
"read",
"readahead",
"readlink",
"readlinkat",
"readv",
"recv",
"recvfrom",
"recvmmsg",
"recvmsg",
"remap_file_pages",
"removexattr",
"rename",
"renameat",
"renameat2",
"restart_syscall",
"rmdir",
"rt_sigaction",
"rt_sigpending",
"rt_sigprocmask",
"rt_sigqueueinfo",
"rt_sigreturn",
"rt_sigsuspend",
"rt_sigtimedwait",
"rt_tgsigqueueinfo",
"sched_getaffinity",
"sched_getattr",
"sched_getparam",
"sched_get_priority_max",
"sched_get_priority_min",
"sched_getscheduler",
"sched_rr_get_interval",
"sched_setaffinity",
"sched_setattr",
"sched_setparam",
"sched_setscheduler",
"sched_yield",
"seccomp",
"select",
"semctl",
"semget",
"semop",
"semtimedop",
"send",
"sendfile",
"sendfile64",
"sendmmsg",
"sendmsg",
"sendto",
"setfsgid",
"setfsgid32",
"setfsuid",
"setfsuid32",
"setgid",
"setgid32",
"setgroups",
"setgroups32",
"setitimer",
"setpgid",
"setpriority",
"setregid",
"setregid32",
"setresgid",
"setresgid32",
"setresuid",
"setresuid32",
"setreuid",
"setreuid32",
"setrlimit",
"set_robust_list",
"setsid",
"setsockopt",
"set_thread_area",
"set_tid_address",
"setuid",
"setuid32",
"setxattr",
"shmat",
"shmctl",
"shmdt",
"shmget",
"shutdown",
"sigaltstack",
"signalfd",
"signalfd4",
"sigreturn",
"socket",
"socketcall",
"socketpair",
"splice",
"stat",
"stat64",
"statfs",
"statfs64",
"symlink",
"symlinkat",
"sync",
"sync_file_range",
"syncfs",
"sysinfo",
"syslog",
"tee",
"tgkill",
"time",
"timer_create",
"timer_delete",
"timerfd_create",
"timerfd_gettime",
"timerfd_settime",
"timer_getoverrun",
"timer_gettime",
"timer_settime",
"times",
"tkill",
"truncate",
"truncate64",
"ugetrlimit",
"umask",
"uname",
"unlink",
"unlinkat",
"utime",
"utimensat",
"utimes",
"vfork",
"vmsplice",
"wait4",
"waitid",
"waitpid",
"write",
"writev",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
},
{
Names: []string{"personality"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{
{
Index: 0,
Value: 0x0,
Op: specs.OpEqualTo,
},
},
},
{
Names: []string{"personality"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{
{
Index: 0,
Value: 0x0008,
Op: specs.OpEqualTo,
},
},
},
{
Names: []string{"personality"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{
{
Index: 0,
Value: 0xffffffff,
Op: specs.OpEqualTo,
},
},
},
}
s := &specs.LinuxSeccomp{
DefaultAction: specs.ActErrno,
Architectures: arches(),
Syscalls: syscalls,
}
// include by arch
switch runtime.GOARCH {
case "arm", "arm64":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"arm_fadvise64_64",
"arm_sync_file_range",
"breakpoint",
"cacheflush",
"set_tls",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "amd64":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"arch_prctl",
"modify_ldt",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "386":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"modify_ldt",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "s390", "s390x":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"s390_pci_mmio_read",
"s390_pci_mmio_write",
"s390_runtime_instr",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
}
admin := false
for _, c := range sp.Process.Capabilities.Bounding {
switch c {
case "CAP_DAC_READ_SEARCH":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{"open_by_handle_at"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_ADMIN":
admin = true
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"bpf",
"clone",
"fanotify_init",
"lookup_dcookie",
"mount",
"name_to_handle_at",
"perf_event_open",
"setdomainname",
"sethostname",
"setns",
"umount",
"umount2",
"unshare",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_BOOT":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{"reboot"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_CHROOT":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{"chroot"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_MODULE":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"delete_module",
"init_module",
"finit_module",
"query_module",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_PACCT":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{"acct"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_PTRACE":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"kcmp",
"process_vm_readv",
"process_vm_writev",
"ptrace",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_RAWIO":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"iopl",
"ioperm",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_TIME":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"settimeofday",
"stime",
"adjtimex",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_TTY_CONFIG":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{"vhangup"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
}
}
if !admin {
switch runtime.GOARCH {
case "s390", "s390x":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"clone",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{
{
Index: 1,
Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET,
ValueTwo: 0,
Op: specs.OpMaskedEqual,
},
},
})
default:
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"clone",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{
{
Index: 0,
Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET,
ValueTwo: 0,
Op: specs.OpMaskedEqual,
},
},
})
}
}
return s
}

202
vendor/github.com/google/shlex/COPYING generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

2
vendor/github.com/google/shlex/README generated vendored Normal file
View file

@ -0,0 +1,2 @@
go-shlex is a simple lexer for go that supports shell-style quoting,
commenting, and escaping.

417
vendor/github.com/google/shlex/shlex.go generated vendored Normal file
View file

@ -0,0 +1,417 @@
/*
Copyright 2012 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package shlex implements a simple lexer which splits input in to tokens using
shell-style rules for quoting and commenting.
The basic use case uses the default ASCII lexer to split a string into sub-strings:
shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"}
To process a stream of strings:
l := NewLexer(os.Stdin)
for ; token, err := l.Next(); err != nil {
// process token
}
To access the raw token stream (which includes tokens for comments):
t := NewTokenizer(os.Stdin)
for ; token, err := t.Next(); err != nil {
// process token
}
*/
package shlex
import (
"bufio"
"fmt"
"io"
"strings"
)
// TokenType is a top-level token classification: A word, space, comment, unknown.
type TokenType int
// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape.
type runeTokenClass int
// the internal state used by the lexer state machine
type lexerState int
// Token is a (type, value) pair representing a lexographical token.
type Token struct {
tokenType TokenType
value string
}
// Equal reports whether tokens a, and b, are equal.
// Two tokens are equal if both their types and values are equal. A nil token can
// never be equal to another token.
func (a *Token) Equal(b *Token) bool {
if a == nil || b == nil {
return false
}
if a.tokenType != b.tokenType {
return false
}
return a.value == b.value
}
// Named classes of UTF-8 runes
const (
spaceRunes = " \t\r\n"
escapingQuoteRunes = `"`
nonEscapingQuoteRunes = "'"
escapeRunes = `\`
commentRunes = "#"
)
// Classes of rune token
const (
unknownRuneClass runeTokenClass = iota
spaceRuneClass
escapingQuoteRuneClass
nonEscapingQuoteRuneClass
escapeRuneClass
commentRuneClass
eofRuneClass
)
// Classes of lexographic token
const (
UnknownToken TokenType = iota
WordToken
SpaceToken
CommentToken
)
// Lexer state machine states
const (
startState lexerState = iota // no runes have been seen
inWordState // processing regular runes in a word
escapingState // we have just consumed an escape rune; the next rune is literal
escapingQuotedState // we have just consumed an escape rune within a quoted string
quotingEscapingState // we are within a quoted string that supports escaping ("...")
quotingState // we are within a string that does not support escaping ('...')
commentState // we are within a comment (everything following an unquoted or unescaped #
)
// tokenClassifier is used for classifying rune characters.
type tokenClassifier map[rune]runeTokenClass
func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) {
for _, runeChar := range runes {
typeMap[runeChar] = tokenType
}
}
// newDefaultClassifier creates a new classifier for ASCII characters.
func newDefaultClassifier() tokenClassifier {
t := tokenClassifier{}
t.addRuneClass(spaceRunes, spaceRuneClass)
t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass)
t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass)
t.addRuneClass(escapeRunes, escapeRuneClass)
t.addRuneClass(commentRunes, commentRuneClass)
return t
}
// ClassifyRune classifiees a rune
func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass {
return t[runeVal]
}
// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped.
type Lexer Tokenizer
// NewLexer creates a new lexer from an input stream.
func NewLexer(r io.Reader) *Lexer {
return (*Lexer)(NewTokenizer(r))
}
// Next returns the next word, or an error. If there are no more words,
// the error will be io.EOF.
func (l *Lexer) Next() (string, error) {
for {
token, err := (*Tokenizer)(l).Next()
if err != nil {
return "", err
}
switch token.tokenType {
case WordToken:
return token.value, nil
case CommentToken:
// skip comments
default:
return "", fmt.Errorf("Unknown token type: %v", token.tokenType)
}
}
}
// Tokenizer turns an input stream into a sequence of typed tokens
type Tokenizer struct {
input bufio.Reader
classifier tokenClassifier
}
// NewTokenizer creates a new tokenizer from an input stream.
func NewTokenizer(r io.Reader) *Tokenizer {
input := bufio.NewReader(r)
classifier := newDefaultClassifier()
return &Tokenizer{
input: *input,
classifier: classifier}
}
// scanStream scans the stream for the next token using the internal state machine.
// It will panic if it encounters a rune which it does not know how to handle.
func (t *Tokenizer) scanStream() (*Token, error) {
state := startState
var tokenType TokenType
var value []rune
var nextRune rune
var nextRuneType runeTokenClass
var err error
for {
nextRune, _, err = t.input.ReadRune()
nextRuneType = t.classifier.ClassifyRune(nextRune)
if err == io.EOF {
nextRuneType = eofRuneClass
err = nil
} else if err != nil {
return nil, err
}
switch state {
case startState: // no runes read yet
{
switch nextRuneType {
case eofRuneClass:
{
return nil, io.EOF
}
case spaceRuneClass:
{
}
case escapingQuoteRuneClass:
{
tokenType = WordToken
state = quotingEscapingState
}
case nonEscapingQuoteRuneClass:
{
tokenType = WordToken
state = quotingState
}
case escapeRuneClass:
{
tokenType = WordToken
state = escapingState
}
case commentRuneClass:
{
tokenType = CommentToken
state = commentState
}
default:
{
tokenType = WordToken
value = append(value, nextRune)
state = inWordState
}
}
}
case inWordState: // in a regular word
{
switch nextRuneType {
case eofRuneClass:
{
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
case spaceRuneClass:
{
t.input.UnreadRune()
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
case escapingQuoteRuneClass:
{
state = quotingEscapingState
}
case nonEscapingQuoteRuneClass:
{
state = quotingState
}
case escapeRuneClass:
{
state = escapingState
}
default:
{
value = append(value, nextRune)
}
}
}
case escapingState: // the rune after an escape character
{
switch nextRuneType {
case eofRuneClass:
{
err = fmt.Errorf("EOF found after escape character")
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
default:
{
state = inWordState
value = append(value, nextRune)
}
}
}
case escapingQuotedState: // the next rune after an escape character, in double quotes
{
switch nextRuneType {
case eofRuneClass:
{
err = fmt.Errorf("EOF found after escape character")
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
default:
{
state = quotingEscapingState
value = append(value, nextRune)
}
}
}
case quotingEscapingState: // in escaping double quotes
{
switch nextRuneType {
case eofRuneClass:
{
err = fmt.Errorf("EOF found when expecting closing quote")
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
case escapingQuoteRuneClass:
{
state = inWordState
}
case escapeRuneClass:
{
state = escapingQuotedState
}
default:
{
value = append(value, nextRune)
}
}
}
case quotingState: // in non-escaping single quotes
{
switch nextRuneType {
case eofRuneClass:
{
err = fmt.Errorf("EOF found when expecting closing quote")
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
case nonEscapingQuoteRuneClass:
{
state = inWordState
}
default:
{
value = append(value, nextRune)
}
}
}
case commentState: // in a comment
{
switch nextRuneType {
case eofRuneClass:
{
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
case spaceRuneClass:
{
if nextRune == '\n' {
state = startState
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
} else {
value = append(value, nextRune)
}
}
default:
{
value = append(value, nextRune)
}
}
}
default:
{
return nil, fmt.Errorf("Unexpected state: %v", state)
}
}
}
}
// Next returns the next token in the stream.
func (t *Tokenizer) Next() (*Token, error) {
return t.scanStream()
}
// Split partitions a string into a slice of strings.
func Split(s string) ([]string, error) {
l := NewLexer(strings.NewReader(s))
subStrings := make([]string, 0)
for {
word, err := l.Next()
if err != nil {
if err == io.EOF {
return subStrings, nil
}
return subStrings, err
}
subStrings = append(subStrings, word)
}
}

View file

@ -2,6 +2,7 @@ package iradix
import (
"bytes"
"strings"
"github.com/hashicorp/golang-lru/simplelru"
)
@ -11,7 +12,9 @@ const (
// cache used per transaction. This is used to cache the updates
// to the nodes near the root, while the leaves do not need to be
// cached. This is important for very large transactions to prevent
// the modified cache from growing to be enormous.
// the modified cache from growing to be enormous. This is also used
// to set the max size of the mutation notify maps since those should
// also be bounded in a similar way.
defaultModifiedCache = 8192
)
@ -27,7 +30,11 @@ type Tree struct {
// New returns an empty Tree
func New() *Tree {
t := &Tree{root: &Node{}}
t := &Tree{
root: &Node{
mutateCh: make(chan struct{}),
},
}
return t
}
@ -40,75 +47,208 @@ func (t *Tree) Len() int {
// atomically and returns a new tree when committed. A transaction
// is not thread safe, and should only be used by a single goroutine.
type Txn struct {
root *Node
size int
modified *simplelru.LRU
// root is the modified root for the transaction.
root *Node
// snap is a snapshot of the root node for use if we have to run the
// slow notify algorithm.
snap *Node
// size tracks the size of the tree as it is modified during the
// transaction.
size int
// writable is a cache of writable nodes that have been created during
// the course of the transaction. This allows us to re-use the same
// nodes for further writes and avoid unnecessary copies of nodes that
// have never been exposed outside the transaction. This will only hold
// up to defaultModifiedCache number of entries.
writable *simplelru.LRU
// trackChannels is used to hold channels that need to be notified to
// signal mutation of the tree. This will only hold up to
// defaultModifiedCache number of entries, after which we will set the
// trackOverflow flag, which will cause us to use a more expensive
// algorithm to perform the notifications. Mutation tracking is only
// performed if trackMutate is true.
trackChannels map[chan struct{}]struct{}
trackOverflow bool
trackMutate bool
}
// Txn starts a new transaction that can be used to mutate the tree
func (t *Tree) Txn() *Txn {
txn := &Txn{
root: t.root,
snap: t.root,
size: t.size,
}
return txn
}
// writeNode returns a node to be modified, if the current
// node as already been modified during the course of
// the transaction, it is used in-place.
func (t *Txn) writeNode(n *Node) *Node {
// Ensure the modified set exists
if t.modified == nil {
// TrackMutate can be used to toggle if mutations are tracked. If this is enabled
// then notifications will be issued for affected internal nodes and leaves when
// the transaction is committed.
func (t *Txn) TrackMutate(track bool) {
t.trackMutate = track
}
// trackChannel safely attempts to track the given mutation channel, setting the
// overflow flag if we can no longer track any more. This limits the amount of
// state that will accumulate during a transaction and we have a slower algorithm
// to switch to if we overflow.
func (t *Txn) trackChannel(ch chan struct{}) {
// In overflow, make sure we don't store any more objects.
if t.trackOverflow {
return
}
// If this would overflow the state we reject it and set the flag (since
// we aren't tracking everything that's required any longer).
if len(t.trackChannels) >= defaultModifiedCache {
// Mark that we are in the overflow state
t.trackOverflow = true
// Clear the map so that the channels can be garbage collected. It is
// safe to do this since we have already overflowed and will be using
// the slow notify algorithm.
t.trackChannels = nil
return
}
// Create the map on the fly when we need it.
if t.trackChannels == nil {
t.trackChannels = make(map[chan struct{}]struct{})
}
// Otherwise we are good to track it.
t.trackChannels[ch] = struct{}{}
}
// writeNode returns a node to be modified, if the current node has already been
// modified during the course of the transaction, it is used in-place. Set
// forLeafUpdate to true if you are getting a write node to update the leaf,
// which will set leaf mutation tracking appropriately as well.
func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node {
// Ensure the writable set exists.
if t.writable == nil {
lru, err := simplelru.NewLRU(defaultModifiedCache, nil)
if err != nil {
panic(err)
}
t.modified = lru
t.writable = lru
}
// If this node has already been modified, we can
// continue to use it during this transaction.
if _, ok := t.modified.Get(n); ok {
// If this node has already been modified, we can continue to use it
// during this transaction. We know that we don't need to track it for
// a node update since the node is writable, but if this is for a leaf
// update we track it, in case the initial write to this node didn't
// update the leaf.
if _, ok := t.writable.Get(n); ok {
if t.trackMutate && forLeafUpdate && n.leaf != nil {
t.trackChannel(n.leaf.mutateCh)
}
return n
}
// Copy the existing node
nc := new(Node)
// Mark this node as being mutated.
if t.trackMutate {
t.trackChannel(n.mutateCh)
}
// Mark its leaf as being mutated, if appropriate.
if t.trackMutate && forLeafUpdate && n.leaf != nil {
t.trackChannel(n.leaf.mutateCh)
}
// Copy the existing node. If you have set forLeafUpdate it will be
// safe to replace this leaf with another after you get your node for
// writing. You MUST replace it, because the channel associated with
// this leaf will be closed when this transaction is committed.
nc := &Node{
mutateCh: make(chan struct{}),
leaf: n.leaf,
}
if n.prefix != nil {
nc.prefix = make([]byte, len(n.prefix))
copy(nc.prefix, n.prefix)
}
if n.leaf != nil {
nc.leaf = new(leafNode)
*nc.leaf = *n.leaf
}
if len(n.edges) != 0 {
nc.edges = make([]edge, len(n.edges))
copy(nc.edges, n.edges)
}
// Mark this node as modified
t.modified.Add(n, nil)
// Mark this node as writable.
t.writable.Add(nc, nil)
return nc
}
// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction
// Returns the size of the subtree visited
func (t *Txn) trackChannelsAndCount(n *Node) int {
// Count only leaf nodes
leaves := 0
if n.leaf != nil {
leaves = 1
}
// Mark this node as being mutated.
if t.trackMutate {
t.trackChannel(n.mutateCh)
}
// Mark its leaf as being mutated, if appropriate.
if t.trackMutate && n.leaf != nil {
t.trackChannel(n.leaf.mutateCh)
}
// Recurse on the children
for _, e := range n.edges {
leaves += t.trackChannelsAndCount(e.node)
}
return leaves
}
// mergeChild is called to collapse the given node with its child. This is only
// called when the given node is not a leaf and has a single edge.
func (t *Txn) mergeChild(n *Node) {
// Mark the child node as being mutated since we are about to abandon
// it. We don't need to mark the leaf since we are retaining it if it
// is there.
e := n.edges[0]
child := e.node
if t.trackMutate {
t.trackChannel(child.mutateCh)
}
// Merge the nodes.
n.prefix = concat(n.prefix, child.prefix)
n.leaf = child.leaf
if len(child.edges) != 0 {
n.edges = make([]edge, len(child.edges))
copy(n.edges, child.edges)
} else {
n.edges = nil
}
}
// insert does a recursive insertion
func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) {
// Handle key exhaution
// Handle key exhaustion
if len(search) == 0 {
nc := t.writeNode(n)
var oldVal interface{}
didUpdate := false
if n.isLeaf() {
old := nc.leaf.val
nc.leaf.val = v
return nc, old, true
} else {
nc.leaf = &leafNode{
key: k,
val: v,
}
return nc, nil, false
oldVal = n.leaf.val
didUpdate = true
}
nc := t.writeNode(n, true)
nc.leaf = &leafNode{
mutateCh: make(chan struct{}),
key: k,
val: v,
}
return nc, oldVal, didUpdate
}
// Look for the edge
@ -119,14 +259,16 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface
e := edge{
label: search[0],
node: &Node{
mutateCh: make(chan struct{}),
leaf: &leafNode{
key: k,
val: v,
mutateCh: make(chan struct{}),
key: k,
val: v,
},
prefix: search,
},
}
nc := t.writeNode(n)
nc := t.writeNode(n, false)
nc.addEdge(e)
return nc, nil, false
}
@ -137,7 +279,7 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface
search = search[commonPrefix:]
newChild, oldVal, didUpdate := t.insert(child, k, search, v)
if newChild != nil {
nc := t.writeNode(n)
nc := t.writeNode(n, false)
nc.edges[idx].node = newChild
return nc, oldVal, didUpdate
}
@ -145,9 +287,10 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface
}
// Split the node
nc := t.writeNode(n)
nc := t.writeNode(n, false)
splitNode := &Node{
prefix: search[:commonPrefix],
mutateCh: make(chan struct{}),
prefix: search[:commonPrefix],
}
nc.replaceEdge(edge{
label: search[0],
@ -155,7 +298,7 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface
})
// Restore the existing child node
modChild := t.writeNode(child)
modChild := t.writeNode(child, false)
splitNode.addEdge(edge{
label: modChild.prefix[commonPrefix],
node: modChild,
@ -164,8 +307,9 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface
// Create a new leaf node
leaf := &leafNode{
key: k,
val: v,
mutateCh: make(chan struct{}),
key: k,
val: v,
}
// If the new key is a subset, add to to this node
@ -179,8 +323,9 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface
splitNode.addEdge(edge{
label: search[0],
node: &Node{
leaf: leaf,
prefix: search,
mutateCh: make(chan struct{}),
leaf: leaf,
prefix: search,
},
})
return nc, nil, false
@ -188,19 +333,19 @@ func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface
// delete does a recursive deletion
func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) {
// Check for key exhaution
// Check for key exhaustion
if len(search) == 0 {
if !n.isLeaf() {
return nil, nil
}
// Remove the leaf node
nc := t.writeNode(n)
nc := t.writeNode(n, true)
nc.leaf = nil
// Check if this node should be merged
if n != t.root && len(nc.edges) == 1 {
nc.mergeChild()
t.mergeChild(nc)
}
return nc, n.leaf
}
@ -219,14 +364,17 @@ func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) {
return nil, nil
}
// Copy this node
nc := t.writeNode(n)
// Copy this node. WATCH OUT - it's safe to pass "false" here because we
// will only ADD a leaf via nc.mergeChild() if there isn't one due to
// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
// so be careful if you change any of the logic here.
nc := t.writeNode(n, false)
// Delete the edge if the node has no edges
if newChild.leaf == nil && len(newChild.edges) == 0 {
nc.delEdge(label)
if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
nc.mergeChild()
t.mergeChild(nc)
}
} else {
nc.edges[idx].node = newChild
@ -234,6 +382,56 @@ func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) {
return nc, leaf
}
// delete does a recursive deletion
func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) {
// Check for key exhaustion
if len(search) == 0 {
nc := t.writeNode(n, true)
if n.isLeaf() {
nc.leaf = nil
}
nc.edges = nil
return nc, t.trackChannelsAndCount(n)
}
// Look for an edge
label := search[0]
idx, child := n.getEdge(label)
// We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix
// Need to do both so that we can delete prefixes that don't correspond to any node in the tree
if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) {
return nil, 0
}
// Consume the search prefix
if len(child.prefix) > len(search) {
search = []byte("")
} else {
search = search[len(child.prefix):]
}
newChild, numDeletions := t.deletePrefix(n, child, search)
if newChild == nil {
return nil, 0
}
// Copy this node. WATCH OUT - it's safe to pass "false" here because we
// will only ADD a leaf via nc.mergeChild() if there isn't one due to
// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
// so be careful if you change any of the logic here.
nc := t.writeNode(n, false)
// Delete the edge if the node has no edges
if newChild.leaf == nil && len(newChild.edges) == 0 {
nc.delEdge(label)
if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
t.mergeChild(nc)
}
} else {
nc.edges[idx].node = newChild
}
return nc, numDeletions
}
// Insert is used to add or update a given key. The return provides
// the previous value and a bool indicating if any was set.
func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) {
@ -261,6 +459,19 @@ func (t *Txn) Delete(k []byte) (interface{}, bool) {
return nil, false
}
// DeletePrefix is used to delete an entire subtree that matches the prefix
// This will delete all nodes under that prefix
func (t *Txn) DeletePrefix(prefix []byte) bool {
newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix)
if newRoot != nil {
t.root = newRoot
t.size = t.size - numDeletions
return true
}
return false
}
// Root returns the current root of the radix tree within this
// transaction. The root is not safe across insert and delete operations,
// but can be used to read the current state during a transaction.
@ -274,10 +485,115 @@ func (t *Txn) Get(k []byte) (interface{}, bool) {
return t.root.Get(k)
}
// Commit is used to finalize the transaction and return a new tree
// GetWatch is used to lookup a specific key, returning
// the watch channel, value and if it was found
func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
return t.root.GetWatch(k)
}
// Commit is used to finalize the transaction and return a new tree. If mutation
// tracking is turned on then notifications will also be issued.
func (t *Txn) Commit() *Tree {
t.modified = nil
return &Tree{t.root, t.size}
nt := t.CommitOnly()
if t.trackMutate {
t.Notify()
}
return nt
}
// CommitOnly is used to finalize the transaction and return a new tree, but
// does not issue any notifications until Notify is called.
func (t *Txn) CommitOnly() *Tree {
nt := &Tree{t.root, t.size}
t.writable = nil
return nt
}
// slowNotify does a complete comparison of the before and after trees in order
// to trigger notifications. This doesn't require any additional state but it
// is very expensive to compute.
func (t *Txn) slowNotify() {
snapIter := t.snap.rawIterator()
rootIter := t.root.rawIterator()
for snapIter.Front() != nil || rootIter.Front() != nil {
// If we've exhausted the nodes in the old snapshot, we know
// there's nothing remaining to notify.
if snapIter.Front() == nil {
return
}
snapElem := snapIter.Front()
// If we've exhausted the nodes in the new root, we know we need
// to invalidate everything that remains in the old snapshot. We
// know from the loop condition there's something in the old
// snapshot.
if rootIter.Front() == nil {
close(snapElem.mutateCh)
if snapElem.isLeaf() {
close(snapElem.leaf.mutateCh)
}
snapIter.Next()
continue
}
// Do one string compare so we can check the various conditions
// below without repeating the compare.
cmp := strings.Compare(snapIter.Path(), rootIter.Path())
// If the snapshot is behind the root, then we must have deleted
// this node during the transaction.
if cmp < 0 {
close(snapElem.mutateCh)
if snapElem.isLeaf() {
close(snapElem.leaf.mutateCh)
}
snapIter.Next()
continue
}
// If the snapshot is ahead of the root, then we must have added
// this node during the transaction.
if cmp > 0 {
rootIter.Next()
continue
}
// If we have the same path, then we need to see if we mutated a
// node and possibly the leaf.
rootElem := rootIter.Front()
if snapElem != rootElem {
close(snapElem.mutateCh)
if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) {
close(snapElem.leaf.mutateCh)
}
}
snapIter.Next()
rootIter.Next()
}
}
// Notify is used along with TrackMutate to trigger notifications. This must
// only be done once a transaction is committed via CommitOnly, and it is called
// automatically by Commit.
func (t *Txn) Notify() {
if !t.trackMutate {
return
}
// If we've overflowed the tracking state we can't use it in any way and
// need to do a full tree compare.
if t.trackOverflow {
t.slowNotify()
} else {
for ch := range t.trackChannels {
close(ch)
}
}
// Clean up the tracking state so that a re-notify is safe (will trigger
// the else clause above which will be a no-op).
t.trackChannels = nil
t.trackOverflow = false
}
// Insert is used to add or update a given key. The return provides
@ -296,6 +612,14 @@ func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) {
return txn.Commit(), old, ok
}
// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree,
// and a bool indicating if the prefix matched any nodes
func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) {
txn := t.Txn()
ok := txn.DeletePrefix(k)
return txn.Commit(), ok
}
// Root returns the root node of the tree which can be used for richer
// query operations.
func (t *Tree) Root() *Node {

View file

@ -9,11 +9,13 @@ type Iterator struct {
stack []edges
}
// SeekPrefix is used to seek the iterator to a given prefix
func (i *Iterator) SeekPrefix(prefix []byte) {
// SeekPrefixWatch is used to seek the iterator to a given prefix
// and returns the watch channel of the finest granularity
func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
// Wipe the stack
i.stack = nil
n := i.node
watch = n.mutateCh
search := prefix
for {
// Check for key exhaution
@ -29,6 +31,9 @@ func (i *Iterator) SeekPrefix(prefix []byte) {
return
}
// Update to the finest granularity as the search makes progress
watch = n.mutateCh
// Consume the search prefix
if bytes.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
@ -43,6 +48,11 @@ func (i *Iterator) SeekPrefix(prefix []byte) {
}
}
// SeekPrefix is used to seek the iterator to a given prefix
func (i *Iterator) SeekPrefix(prefix []byte) {
i.SeekPrefixWatch(prefix)
}
// Next returns the next node in order
func (i *Iterator) Next() ([]byte, interface{}, bool) {
// Initialize our stack if needed

View file

@ -12,8 +12,9 @@ type WalkFn func(k []byte, v interface{}) bool
// leafNode is used to represent a value
type leafNode struct {
key []byte
val interface{}
mutateCh chan struct{}
key []byte
val interface{}
}
// edge is used to represent an edge node
@ -24,6 +25,9 @@ type edge struct {
// Node is an immutable node in the radix tree
type Node struct {
// mutateCh is closed if this node is modified
mutateCh chan struct{}
// leaf is used to store possible leaf
leaf *leafNode
@ -87,31 +91,14 @@ func (n *Node) delEdge(label byte) {
}
}
func (n *Node) mergeChild() {
e := n.edges[0]
child := e.node
n.prefix = concat(n.prefix, child.prefix)
if child.leaf != nil {
n.leaf = new(leafNode)
*n.leaf = *child.leaf
} else {
n.leaf = nil
}
if len(child.edges) != 0 {
n.edges = make([]edge, len(child.edges))
copy(n.edges, child.edges)
} else {
n.edges = nil
}
}
func (n *Node) Get(k []byte) (interface{}, bool) {
func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
search := k
watch := n.mutateCh
for {
// Check for key exhaution
// Check for key exhaustion
if len(search) == 0 {
if n.isLeaf() {
return n.leaf.val, true
return n.leaf.mutateCh, n.leaf.val, true
}
break
}
@ -122,6 +109,9 @@ func (n *Node) Get(k []byte) (interface{}, bool) {
break
}
// Update to the finest granularity as the search makes progress
watch = n.mutateCh
// Consume the search prefix
if bytes.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
@ -129,7 +119,12 @@ func (n *Node) Get(k []byte) (interface{}, bool) {
break
}
}
return nil, false
return watch, nil, false
}
func (n *Node) Get(k []byte) (interface{}, bool) {
_, val, ok := n.GetWatch(k)
return val, ok
}
// LongestPrefix is like Get, but instead of an
@ -204,6 +199,14 @@ func (n *Node) Iterator() *Iterator {
return &Iterator{node: n}
}
// rawIterator is used to return a raw iterator at the given node to walk the
// tree.
func (n *Node) rawIterator() *rawIterator {
iter := &rawIterator{node: n}
iter.Next()
return iter
}
// Walk is used to walk the tree
func (n *Node) Walk(fn WalkFn) {
recursiveWalk(n, fn)
@ -271,6 +274,66 @@ func (n *Node) WalkPath(path []byte, fn WalkFn) {
}
}
func (n *Node) Seek(prefix []byte) *Seeker {
search := prefix
p := &pos{n: n}
for {
// Check for key exhaution
if len(search) == 0 {
return &Seeker{p}
}
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= search[0]
})
p.current = idx
if idx < len(n.edges) {
n = n.edges[idx].node
if bytes.HasPrefix(search, n.prefix) && len(n.edges) > 0 {
search = search[len(n.prefix):]
p.current++
p = &pos{n: n, prev: p}
continue
}
}
p.current++
return &Seeker{p}
}
}
type Seeker struct {
*pos
}
type pos struct {
n *Node
current int
prev *pos
isLeaf bool
}
func (s *Seeker) Next() (k []byte, v interface{}, ok bool) {
if s.current >= len(s.n.edges) {
if s.prev == nil {
return nil, nil, false
}
s.pos = s.prev
return s.Next()
}
edge := s.n.edges[s.current]
s.current++
if edge.node.leaf != nil && !s.isLeaf {
s.isLeaf = true
s.current--
return edge.node.leaf.key, edge.node.leaf.val, true
}
s.isLeaf = false
s.pos = &pos{n: edge.node, prev: s.pos}
return s.Next()
}
// recursiveWalk is used to do a pre-order walk of a node
// recursively. Returns true if the walk should be aborted
func recursiveWalk(n *Node, fn WalkFn) bool {

View file

@ -0,0 +1,78 @@
package iradix
// rawIterator visits each of the nodes in the tree, even the ones that are not
// leaves. It keeps track of the effective path (what a leaf at a given node
// would be called), which is useful for comparing trees.
type rawIterator struct {
// node is the starting node in the tree for the iterator.
node *Node
// stack keeps track of edges in the frontier.
stack []rawStackEntry
// pos is the current position of the iterator.
pos *Node
// path is the effective path of the current iterator position,
// regardless of whether the current node is a leaf.
path string
}
// rawStackEntry is used to keep track of the cumulative common path as well as
// its associated edges in the frontier.
type rawStackEntry struct {
path string
edges edges
}
// Front returns the current node that has been iterated to.
func (i *rawIterator) Front() *Node {
return i.pos
}
// Path returns the effective path of the current node, even if it's not actually
// a leaf.
func (i *rawIterator) Path() string {
return i.path
}
// Next advances the iterator to the next node.
func (i *rawIterator) Next() {
// Initialize our stack if needed.
if i.stack == nil && i.node != nil {
i.stack = []rawStackEntry{
rawStackEntry{
edges: edges{
edge{node: i.node},
},
},
}
}
for len(i.stack) > 0 {
// Inspect the last element of the stack.
n := len(i.stack)
last := i.stack[n-1]
elem := last.edges[0].node
// Update the stack.
if len(last.edges) > 1 {
i.stack[n-1].edges = last.edges[1:]
} else {
i.stack = i.stack[:n-1]
}
// Push the edges onto the frontier.
if len(elem.edges) > 0 {
path := last.path + string(elem.prefix)
i.stack = append(i.stack, rawStackEntry{path, elem.edges})
}
i.pos = elem
i.path = last.path + string(elem.prefix)
return
}
i.pos = nil
i.path = ""
}

21
vendor/github.com/mitchellh/hashstructure/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Mitchell Hashimoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

65
vendor/github.com/mitchellh/hashstructure/README.md generated vendored Normal file
View file

@ -0,0 +1,65 @@
# hashstructure [![GoDoc](https://godoc.org/github.com/mitchellh/hashstructure?status.svg)](https://godoc.org/github.com/mitchellh/hashstructure)
hashstructure is a Go library for creating a unique hash value
for arbitrary values in Go.
This can be used to key values in a hash (for use in a map, set, etc.)
that are complex. The most common use case is comparing two values without
sending data across the network, caching values locally (de-dup), and so on.
## Features
* Hash any arbitrary Go value, including complex types.
* Tag a struct field to ignore it and not affect the hash value.
* Tag a slice type struct field to treat it as a set where ordering
doesn't affect the hash code but the field itself is still taken into
account to create the hash value.
* Optionally specify a custom hash function to optimize for speed, collision
avoidance for your data set, etc.
* Optionally hash the output of `.String()` on structs that implement fmt.Stringer,
allowing effective hashing of time.Time
## Installation
Standard `go get`:
```
$ go get github.com/mitchellh/hashstructure
```
## Usage & Example
For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure).
A quick code example is shown below:
```go
type ComplexStruct struct {
Name string
Age uint
Metadata map[string]interface{}
}
v := ComplexStruct{
Name: "mitchellh",
Age: 64,
Metadata: map[string]interface{}{
"car": true,
"location": "California",
"siblings": []string{"Bob", "John"},
},
}
hash, err := hashstructure.Hash(v, nil)
if err != nil {
panic(err)
}
fmt.Printf("%d", hash)
// Output:
// 2307517237273902113
```

View file

@ -0,0 +1,358 @@
package hashstructure
import (
"encoding/binary"
"fmt"
"hash"
"hash/fnv"
"reflect"
)
// ErrNotStringer is returned when there's an error with hash:"string"
type ErrNotStringer struct {
Field string
}
// Error implements error for ErrNotStringer
func (ens *ErrNotStringer) Error() string {
return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field)
}
// HashOptions are options that are available for hashing.
type HashOptions struct {
// Hasher is the hash function to use. If this isn't set, it will
// default to FNV.
Hasher hash.Hash64
// TagName is the struct tag to look at when hashing the structure.
// By default this is "hash".
TagName string
// ZeroNil is flag determining if nil pointer should be treated equal
// to a zero value of pointed type. By default this is false.
ZeroNil bool
}
// Hash returns the hash value of an arbitrary value.
//
// If opts is nil, then default options will be used. See HashOptions
// for the default values. The same *HashOptions value cannot be used
// concurrently. None of the values within a *HashOptions struct are
// safe to read/write while hashing is being done.
//
// Notes on the value:
//
// * Unexported fields on structs are ignored and do not affect the
// hash value.
//
// * Adding an exported field to a struct with the zero value will change
// the hash value.
//
// For structs, the hashing can be controlled using tags. For example:
//
// struct {
// Name string
// UUID string `hash:"ignore"`
// }
//
// The available tag values are:
//
// * "ignore" or "-" - The field will be ignored and not affect the hash code.
//
// * "set" - The field will be treated as a set, where ordering doesn't
// affect the hash code. This only works for slices.
//
// * "string" - The field will be hashed as a string, only works when the
// field implements fmt.Stringer
//
func Hash(v interface{}, opts *HashOptions) (uint64, error) {
// Create default options
if opts == nil {
opts = &HashOptions{}
}
if opts.Hasher == nil {
opts.Hasher = fnv.New64()
}
if opts.TagName == "" {
opts.TagName = "hash"
}
// Reset the hash
opts.Hasher.Reset()
// Create our walker and walk the structure
w := &walker{
h: opts.Hasher,
tag: opts.TagName,
zeronil: opts.ZeroNil,
}
return w.visit(reflect.ValueOf(v), nil)
}
type walker struct {
h hash.Hash64
tag string
zeronil bool
}
type visitOpts struct {
// Flags are a bitmask of flags to affect behavior of this visit
Flags visitFlag
// Information about the struct containing this field
Struct interface{}
StructField string
}
func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) {
t := reflect.TypeOf(0)
// Loop since these can be wrapped in multiple layers of pointers
// and interfaces.
for {
// If we have an interface, dereference it. We have to do this up
// here because it might be a nil in there and the check below must
// catch that.
if v.Kind() == reflect.Interface {
v = v.Elem()
continue
}
if v.Kind() == reflect.Ptr {
if w.zeronil {
t = v.Type().Elem()
}
v = reflect.Indirect(v)
continue
}
break
}
// If it is nil, treat it like a zero.
if !v.IsValid() {
v = reflect.Zero(t)
}
// Binary writing can use raw ints, we have to convert to
// a sized-int, we'll choose the largest...
switch v.Kind() {
case reflect.Int:
v = reflect.ValueOf(int64(v.Int()))
case reflect.Uint:
v = reflect.ValueOf(uint64(v.Uint()))
case reflect.Bool:
var tmp int8
if v.Bool() {
tmp = 1
}
v = reflect.ValueOf(tmp)
}
k := v.Kind()
// We can shortcut numeric values by directly binary writing them
if k >= reflect.Int && k <= reflect.Complex64 {
// A direct hash calculation
w.h.Reset()
err := binary.Write(w.h, binary.LittleEndian, v.Interface())
return w.h.Sum64(), err
}
switch k {
case reflect.Array:
var h uint64
l := v.Len()
for i := 0; i < l; i++ {
current, err := w.visit(v.Index(i), nil)
if err != nil {
return 0, err
}
h = hashUpdateOrdered(w.h, h, current)
}
return h, nil
case reflect.Map:
var includeMap IncludableMap
if opts != nil && opts.Struct != nil {
if v, ok := opts.Struct.(IncludableMap); ok {
includeMap = v
}
}
// Build the hash for the map. We do this by XOR-ing all the key
// and value hashes. This makes it deterministic despite ordering.
var h uint64
for _, k := range v.MapKeys() {
v := v.MapIndex(k)
if includeMap != nil {
incl, err := includeMap.HashIncludeMap(
opts.StructField, k.Interface(), v.Interface())
if err != nil {
return 0, err
}
if !incl {
continue
}
}
kh, err := w.visit(k, nil)
if err != nil {
return 0, err
}
vh, err := w.visit(v, nil)
if err != nil {
return 0, err
}
fieldHash := hashUpdateOrdered(w.h, kh, vh)
h = hashUpdateUnordered(h, fieldHash)
}
return h, nil
case reflect.Struct:
parent := v.Interface()
var include Includable
if impl, ok := parent.(Includable); ok {
include = impl
}
t := v.Type()
h, err := w.visit(reflect.ValueOf(t.Name()), nil)
if err != nil {
return 0, err
}
l := v.NumField()
for i := 0; i < l; i++ {
if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
var f visitFlag
fieldType := t.Field(i)
if fieldType.PkgPath != "" {
// Unexported
continue
}
tag := fieldType.Tag.Get(w.tag)
if tag == "ignore" || tag == "-" {
// Ignore this field
continue
}
// if string is set, use the string value
if tag == "string" {
if impl, ok := innerV.Interface().(fmt.Stringer); ok {
innerV = reflect.ValueOf(impl.String())
} else {
return 0, &ErrNotStringer{
Field: v.Type().Field(i).Name,
}
}
}
// Check if we implement includable and check it
if include != nil {
incl, err := include.HashInclude(fieldType.Name, innerV)
if err != nil {
return 0, err
}
if !incl {
continue
}
}
switch tag {
case "set":
f |= visitFlagSet
}
kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil)
if err != nil {
return 0, err
}
vh, err := w.visit(innerV, &visitOpts{
Flags: f,
Struct: parent,
StructField: fieldType.Name,
})
if err != nil {
return 0, err
}
fieldHash := hashUpdateOrdered(w.h, kh, vh)
h = hashUpdateUnordered(h, fieldHash)
}
}
return h, nil
case reflect.Slice:
// We have two behaviors here. If it isn't a set, then we just
// visit all the elements. If it is a set, then we do a deterministic
// hash code.
var h uint64
var set bool
if opts != nil {
set = (opts.Flags & visitFlagSet) != 0
}
l := v.Len()
for i := 0; i < l; i++ {
current, err := w.visit(v.Index(i), nil)
if err != nil {
return 0, err
}
if set {
h = hashUpdateUnordered(h, current)
} else {
h = hashUpdateOrdered(w.h, h, current)
}
}
return h, nil
case reflect.String:
// Directly hash
w.h.Reset()
_, err := w.h.Write([]byte(v.String()))
return w.h.Sum64(), err
default:
return 0, fmt.Errorf("unknown kind to hash: %s", k)
}
}
func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 {
// For ordered updates, use a real hash function
h.Reset()
// We just panic if the binary writes fail because we are writing
// an int64 which should never be fail-able.
e1 := binary.Write(h, binary.LittleEndian, a)
e2 := binary.Write(h, binary.LittleEndian, b)
if e1 != nil {
panic(e1)
}
if e2 != nil {
panic(e2)
}
return h.Sum64()
}
func hashUpdateUnordered(a, b uint64) uint64 {
return a ^ b
}
// visitFlag is used as a bitmask for affecting visit behavior
type visitFlag uint
const (
visitFlagInvalid visitFlag = iota
visitFlagSet = iota << 1
)

15
vendor/github.com/mitchellh/hashstructure/include.go generated vendored Normal file
View file

@ -0,0 +1,15 @@
package hashstructure
// Includable is an interface that can optionally be implemented by
// a struct. It will be called for each field in the struct to check whether
// it should be included in the hash.
type Includable interface {
HashInclude(field string, v interface{}) (bool, error)
}
// IncludableMap is an interface that can optionally be implemented by
// a struct. It will be called when a map-type field is found to ask the
// struct if the map item should be included in the hash.
type IncludableMap interface {
HashIncludeMap(field string, k, v interface{}) (bool, error)
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,121 @@
syntax = "proto3";
package moby.buildkit.v1;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
import "github.com/moby/buildkit/solver/pb/ops.proto";
option (gogoproto.sizer_all) = true;
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
service Control {
rpc DiskUsage(DiskUsageRequest) returns (DiskUsageResponse);
rpc Prune(PruneRequest) returns (stream UsageRecord);
rpc Solve(SolveRequest) returns (SolveResponse);
rpc Status(StatusRequest) returns (stream StatusResponse);
rpc Session(stream BytesMessage) returns (stream BytesMessage);
rpc ListWorkers(ListWorkersRequest) returns (ListWorkersResponse);
}
message PruneRequest {
// TODO: filter
}
message DiskUsageRequest {
string filter = 1; // FIXME: this should be containerd-compatible repeated string?
}
message DiskUsageResponse {
repeated UsageRecord record = 1;
}
message UsageRecord {
string ID = 1;
bool Mutable = 2;
bool InUse = 3;
int64 Size = 4;
string Parent = 5;
google.protobuf.Timestamp CreatedAt = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
google.protobuf.Timestamp LastUsedAt = 7 [(gogoproto.stdtime) = true];
int64 UsageCount = 8;
string Description = 9;
}
message SolveRequest {
string Ref = 1;
pb.Definition Definition = 2;
string Exporter = 3;
map<string, string> ExporterAttrs = 4;
string Session = 5;
string Frontend = 6;
map<string, string> FrontendAttrs = 7;
CacheOptions Cache = 8 [(gogoproto.nullable) = false];
}
message CacheOptions {
string ExportRef = 1;
repeated string ImportRefs = 2;
map<string, string> ExportAttrs = 3;
}
message SolveResponse {
map<string, string> ExporterResponse = 1;
}
message StatusRequest {
string Ref = 1;
}
message StatusResponse {
repeated Vertex vertexes = 1;
repeated VertexStatus statuses = 2;
repeated VertexLog logs = 3;
}
message Vertex {
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
repeated string inputs = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
string name = 3;
bool cached = 4;
google.protobuf.Timestamp started = 5 [(gogoproto.stdtime) = true ];
google.protobuf.Timestamp completed = 6 [(gogoproto.stdtime) = true ];
string error = 7; // typed errors?
}
message VertexStatus {
string ID = 1;
string vertex = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
string name = 3;
int64 current = 4;
int64 total = 5;
// TODO: add started, completed
google.protobuf.Timestamp timestamp = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
google.protobuf.Timestamp started = 7 [(gogoproto.stdtime) = true ];
google.protobuf.Timestamp completed = 8 [(gogoproto.stdtime) = true ];
}
message VertexLog {
string vertex = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
google.protobuf.Timestamp timestamp = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
int64 stream = 3;
bytes msg = 4;
}
message BytesMessage {
bytes data = 1;
}
message ListWorkersRequest {
repeated string filter = 1; // containerd style
}
message ListWorkersResponse {
repeated WorkerRecord record = 1;
}
message WorkerRecord {
string ID = 1;
map<string, string> Labels = 2;
}

View file

@ -0,0 +1,3 @@
package moby_buildkit_v1
//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. control.proto

View file

@ -0,0 +1,634 @@
package contenthash
import (
"bytes"
"context"
"crypto/sha256"
"io"
"os"
"path"
"path/filepath"
"sync"
"github.com/containerd/continuity/fs"
"github.com/docker/docker/pkg/locker"
iradix "github.com/hashicorp/go-immutable-radix"
"github.com/hashicorp/golang-lru/simplelru"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/snapshot"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil"
)
var errNotFound = errors.Errorf("not found")
var defaultManager *cacheManager
var defaultManagerOnce sync.Once
const keyContentHash = "buildkit.contenthash.v0"
func getDefaultManager() *cacheManager {
defaultManagerOnce.Do(func() {
lru, _ := simplelru.NewLRU(20, nil) // error is impossible on positive size
defaultManager = &cacheManager{lru: lru, locker: locker.New()}
})
return defaultManager
}
// Layout in the radix tree: Every path is saved by cleaned absolute unix path.
// Directories have 2 records, one contains digest for directory header, other
// the recursive digest for directory contents. "/dir/" is the record for
// header, "/dir" is for contents. For the root node "" (empty string) is the
// key for root, "/" for the root header
func Checksum(ctx context.Context, ref cache.ImmutableRef, path string) (digest.Digest, error) {
return getDefaultManager().Checksum(ctx, ref, path)
}
func GetCacheContext(ctx context.Context, md *metadata.StorageItem) (CacheContext, error) {
return getDefaultManager().GetCacheContext(ctx, md)
}
func SetCacheContext(ctx context.Context, md *metadata.StorageItem, cc CacheContext) error {
return getDefaultManager().SetCacheContext(ctx, md, cc)
}
type CacheContext interface {
Checksum(ctx context.Context, ref cache.Mountable, p string) (digest.Digest, error)
HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) error
}
type Hashed interface {
Digest() digest.Digest
}
type cacheManager struct {
locker *locker.Locker
lru *simplelru.LRU
lruMu sync.Mutex
}
func (cm *cacheManager) Checksum(ctx context.Context, ref cache.ImmutableRef, p string) (digest.Digest, error) {
cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata()))
if err != nil {
return "", nil
}
return cc.Checksum(ctx, ref, p)
}
func (cm *cacheManager) GetCacheContext(ctx context.Context, md *metadata.StorageItem) (CacheContext, error) {
cm.locker.Lock(md.ID())
cm.lruMu.Lock()
v, ok := cm.lru.Get(md.ID())
cm.lruMu.Unlock()
if ok {
cm.locker.Unlock(md.ID())
return v.(*cacheContext), nil
}
cc, err := newCacheContext(md)
if err != nil {
cm.locker.Unlock(md.ID())
return nil, err
}
cm.lruMu.Lock()
cm.lru.Add(md.ID(), cc)
cm.lruMu.Unlock()
cm.locker.Unlock(md.ID())
return cc, nil
}
func (cm *cacheManager) SetCacheContext(ctx context.Context, md *metadata.StorageItem, cci CacheContext) error {
cc, ok := cci.(*cacheContext)
if !ok {
return errors.Errorf("invalid cachecontext: %T", cc)
}
if md.ID() != cc.md.ID() {
cc = &cacheContext{
md: md,
tree: cci.(*cacheContext).tree,
dirtyMap: map[string]struct{}{},
}
} else {
if err := cc.save(); err != nil {
return err
}
}
cm.lruMu.Lock()
cm.lru.Add(md.ID(), cc)
cm.lruMu.Unlock()
return nil
}
type cacheContext struct {
mu sync.RWMutex
md *metadata.StorageItem
tree *iradix.Tree
dirty bool // needs to be persisted to disk
// used in HandleChange
txn *iradix.Txn
node *iradix.Node
dirtyMap map[string]struct{}
}
type mount struct {
mountable cache.Mountable
mountPath string
unmount func() error
}
func (m *mount) mount(ctx context.Context) (string, error) {
if m.mountPath != "" {
return m.mountPath, nil
}
mounts, err := m.mountable.Mount(ctx, true)
if err != nil {
return "", err
}
lm := snapshot.LocalMounter(mounts)
mp, err := lm.Mount()
if err != nil {
return "", err
}
m.mountPath = mp
m.unmount = lm.Unmount
return mp, nil
}
func (m *mount) clean() error {
if m.mountPath != "" {
if err := m.unmount(); err != nil {
return err
}
m.mountPath = ""
}
return nil
}
func newCacheContext(md *metadata.StorageItem) (*cacheContext, error) {
cc := &cacheContext{
md: md,
tree: iradix.New(),
dirtyMap: map[string]struct{}{},
}
if err := cc.load(); err != nil {
return nil, err
}
return cc, nil
}
func (cc *cacheContext) load() error {
dt, err := cc.md.GetExternal(keyContentHash)
if err != nil {
return nil
}
var l CacheRecords
if err := l.Unmarshal(dt); err != nil {
return err
}
txn := cc.tree.Txn()
for _, p := range l.Paths {
txn.Insert([]byte(p.Path), p.Record)
}
cc.tree = txn.Commit()
return nil
}
func (cc *cacheContext) save() error {
cc.mu.Lock()
defer cc.mu.Unlock()
if cc.txn != nil {
cc.commitActiveTransaction()
}
var l CacheRecords
node := cc.tree.Root()
node.Walk(func(k []byte, v interface{}) bool {
l.Paths = append(l.Paths, &CacheRecordWithPath{
Path: string(k),
Record: v.(*CacheRecord),
})
return false
})
dt, err := l.Marshal()
if err != nil {
return err
}
return cc.md.SetExternal(keyContentHash, dt)
}
// HandleChange notifies the source about a modification operation
func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) {
p = path.Join("/", filepath.ToSlash(p))
if p == "/" {
p = ""
}
k := convertPathToKey([]byte(p))
deleteDir := func(cr *CacheRecord) {
if cr.Type == CacheRecordTypeDir {
cc.node.WalkPrefix(append(k, 0), func(k []byte, v interface{}) bool {
cc.txn.Delete(k)
return false
})
}
}
cc.mu.Lock()
defer cc.mu.Unlock()
if cc.txn == nil {
cc.txn = cc.tree.Txn()
cc.node = cc.tree.Root()
// root is not called by HandleChange. need to fake it
if _, ok := cc.node.Get([]byte{0}); !ok {
cc.txn.Insert([]byte{0}, &CacheRecord{
Type: CacheRecordTypeDirHeader,
Digest: digest.FromBytes(nil),
})
cc.txn.Insert([]byte(""), &CacheRecord{
Type: CacheRecordTypeDir,
})
}
}
if kind == fsutil.ChangeKindDelete {
v, ok := cc.txn.Delete(k)
if ok {
deleteDir(v.(*CacheRecord))
}
d := path.Dir(p)
if d == "/" {
d = ""
}
cc.dirtyMap[d] = struct{}{}
return
}
stat, ok := fi.Sys().(*fsutil.Stat)
if !ok {
return errors.Errorf("%s invalid change without stat information", p)
}
h, ok := fi.(Hashed)
if !ok {
return errors.Errorf("invalid fileinfo: %s", p)
}
v, ok := cc.node.Get(k)
if ok {
deleteDir(v.(*CacheRecord))
}
cr := &CacheRecord{
Type: CacheRecordTypeFile,
}
if fi.Mode()&os.ModeSymlink != 0 {
cr.Type = CacheRecordTypeSymlink
cr.Linkname = filepath.ToSlash(stat.Linkname)
}
if fi.IsDir() {
cr.Type = CacheRecordTypeDirHeader
cr2 := &CacheRecord{
Type: CacheRecordTypeDir,
}
cc.txn.Insert(k, cr2)
k = append(k, 0)
p += "/"
}
cr.Digest = h.Digest()
cc.txn.Insert(k, cr)
d := path.Dir(p)
if d == "/" {
d = ""
}
cc.dirtyMap[d] = struct{}{}
return nil
}
func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable, p string) (digest.Digest, error) {
m := &mount{mountable: mountable}
defer m.clean()
const maxSymlinkLimit = 255
i := 0
for {
if i > maxSymlinkLimit {
return "", errors.Errorf("too many symlinks: %s", p)
}
cr, err := cc.checksumNoFollow(ctx, m, p)
if err != nil {
return "", err
}
if cr.Type == CacheRecordTypeSymlink {
link := cr.Linkname
if !path.IsAbs(cr.Linkname) {
link = path.Join(path.Dir(p), link)
}
i++
p = link
} else {
return cr.Digest, nil
}
}
}
func (cc *cacheContext) checksumNoFollow(ctx context.Context, m *mount, p string) (*CacheRecord, error) {
p = path.Join("/", filepath.ToSlash(p))
if p == "/" {
p = ""
}
cc.mu.RLock()
if cc.txn == nil {
root := cc.tree.Root()
cc.mu.RUnlock()
v, ok := root.Get(convertPathToKey([]byte(p)))
if ok {
cr := v.(*CacheRecord)
if cr.Digest != "" {
return cr, nil
}
}
} else {
cc.mu.RUnlock()
}
cc.mu.Lock()
defer cc.mu.Unlock()
if cc.txn != nil {
cc.commitActiveTransaction()
}
defer func() {
if cc.dirty {
go cc.save()
cc.dirty = false
}
}()
return cc.lazyChecksum(ctx, m, p)
}
func (cc *cacheContext) commitActiveTransaction() {
for d := range cc.dirtyMap {
addParentToMap(d, cc.dirtyMap)
}
for d := range cc.dirtyMap {
k := convertPathToKey([]byte(d))
if _, ok := cc.txn.Get(k); ok {
cc.txn.Insert(k, &CacheRecord{Type: CacheRecordTypeDir})
}
}
cc.tree = cc.txn.Commit()
cc.node = nil
cc.dirtyMap = map[string]struct{}{}
cc.txn = nil
}
func (cc *cacheContext) lazyChecksum(ctx context.Context, m *mount, p string) (*CacheRecord, error) {
root := cc.tree.Root()
if cc.needsScan(root, p) {
if err := cc.scanPath(ctx, m, p); err != nil {
return nil, err
}
}
k := convertPathToKey([]byte(p))
txn := cc.tree.Txn()
root = txn.Root()
cr, updated, err := cc.checksum(ctx, root, txn, m, k)
if err != nil {
return nil, err
}
cc.tree = txn.Commit()
cc.dirty = updated
return cr, err
}
func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *iradix.Txn, m *mount, k []byte) (*CacheRecord, bool, error) {
v, ok := root.Get(k)
if !ok {
return nil, false, errors.Wrapf(errNotFound, "%s not found", convertKeyToPath(k))
}
cr := v.(*CacheRecord)
if cr.Digest != "" {
return cr, false, nil
}
var dgst digest.Digest
switch cr.Type {
case CacheRecordTypeDir:
h := sha256.New()
next := append(k, 0)
iter := root.Seek(next)
subk := next
ok := true
for {
if !ok || !bytes.HasPrefix(subk, next) {
break
}
h.Write(bytes.TrimPrefix(subk, k))
subcr, _, err := cc.checksum(ctx, root, txn, m, subk)
if err != nil {
return nil, false, err
}
h.Write([]byte(subcr.Digest))
if subcr.Type == CacheRecordTypeDir { // skip subfiles
next := append(subk, 0, 0xff)
iter = root.Seek(next)
}
subk, _, ok = iter.Next()
}
dgst = digest.NewDigest(digest.SHA256, h)
default:
p := string(convertKeyToPath(bytes.TrimSuffix(k, []byte{0})))
target, err := m.mount(ctx)
if err != nil {
return nil, false, err
}
// no FollowSymlinkInScope because invalid paths should not be inserted
fp := filepath.Join(target, filepath.FromSlash(p))
fi, err := os.Lstat(fp)
if err != nil {
return nil, false, err
}
dgst, err = prepareDigest(fp, p, fi)
if err != nil {
return nil, false, err
}
}
cr2 := &CacheRecord{
Digest: dgst,
Type: cr.Type,
Linkname: cr.Linkname,
}
txn.Insert(k, cr2)
return cr2, true, nil
}
func (cc *cacheContext) needsScan(root *iradix.Node, p string) bool {
if p == "/" {
p = ""
}
if _, ok := root.Get(convertPathToKey([]byte(p))); !ok {
if p == "" {
return true
}
return cc.needsScan(root, path.Clean(path.Dir(p)))
}
return false
}
func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retErr error) {
p = path.Join("/", p)
d, _ := path.Split(p)
mp, err := m.mount(ctx)
if err != nil {
return err
}
parentPath, err := fs.RootPath(mp, filepath.FromSlash(d))
if err != nil {
return err
}
n := cc.tree.Root()
txn := cc.tree.Txn()
err = filepath.Walk(parentPath, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return errors.Wrapf(err, "failed to walk %s", path)
}
rel, err := filepath.Rel(mp, path)
if err != nil {
return err
}
k := []byte(filepath.Join("/", filepath.ToSlash(rel)))
if string(k) == "/" {
k = []byte{}
}
k = convertPathToKey(k)
if _, ok := n.Get(k); !ok {
cr := &CacheRecord{
Type: CacheRecordTypeFile,
}
if fi.Mode()&os.ModeSymlink != 0 {
cr.Type = CacheRecordTypeSymlink
link, err := os.Readlink(path)
if err != nil {
return err
}
cr.Linkname = filepath.ToSlash(link)
}
if fi.IsDir() {
cr.Type = CacheRecordTypeDirHeader
cr2 := &CacheRecord{
Type: CacheRecordTypeDir,
}
txn.Insert(k, cr2)
k = append(k, 0)
}
txn.Insert(k, cr)
}
return nil
})
if err != nil {
return err
}
cc.tree = txn.Commit()
return nil
}
func prepareDigest(fp, p string, fi os.FileInfo) (digest.Digest, error) {
h, err := NewFileHash(fp, fi)
if err != nil {
return "", errors.Wrapf(err, "failed to create hash for %s", p)
}
if fi.Mode().IsRegular() && fi.Size() > 0 {
// TODO: would be nice to put the contents to separate hash first
// so it can be cached for hardlinks
f, err := os.Open(fp)
if err != nil {
return "", errors.Wrapf(err, "failed to open %s", p)
}
defer f.Close()
if _, err := poolsCopy(h, f); err != nil {
return "", errors.Wrapf(err, "failed to copy file data for %s", p)
}
}
return digest.NewDigest(digest.SHA256, h), nil
}
func addParentToMap(d string, m map[string]struct{}) {
if d == "" {
return
}
d = path.Dir(d)
if d == "/" {
d = ""
}
m[d] = struct{}{}
addParentToMap(d, m)
}
func ensureOriginMetadata(md *metadata.StorageItem) *metadata.StorageItem {
v := md.Get("cache.equalMutable") // TODO: const
if v == nil {
return md
}
var mutable string
if err := v.Unmarshal(&mutable); err != nil {
return md
}
si, ok := md.Storage().Get(mutable)
if ok {
return si
}
return md
}
var pool32K = sync.Pool{
New: func() interface{} { return make([]byte, 32*1024) }, // 32K
}
func poolsCopy(dst io.Writer, src io.Reader) (written int64, err error) {
buf := pool32K.Get().([]byte)
written, err = io.CopyBuffer(dst, src, buf)
pool32K.Put(buf)
return
}
func convertPathToKey(p []byte) []byte {
return bytes.Replace([]byte(p), []byte("/"), []byte{0}, -1)
}
func convertKeyToPath(p []byte) []byte {
return bytes.Replace([]byte(p), []byte{0}, []byte("/"), -1)
}

View file

@ -0,0 +1,755 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: checksum.proto
/*
Package contenthash is a generated protocol buffer package.
It is generated from these files:
checksum.proto
It has these top-level messages:
CacheRecord
CacheRecordWithPath
CacheRecords
*/
package contenthash
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type CacheRecordType int32
const (
CacheRecordTypeFile CacheRecordType = 0
CacheRecordTypeDir CacheRecordType = 1
CacheRecordTypeDirHeader CacheRecordType = 2
CacheRecordTypeSymlink CacheRecordType = 3
)
var CacheRecordType_name = map[int32]string{
0: "FILE",
1: "DIR",
2: "DIR_HEADER",
3: "SYMLINK",
}
var CacheRecordType_value = map[string]int32{
"FILE": 0,
"DIR": 1,
"DIR_HEADER": 2,
"SYMLINK": 3,
}
func (x CacheRecordType) String() string {
return proto.EnumName(CacheRecordType_name, int32(x))
}
func (CacheRecordType) EnumDescriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{0} }
type CacheRecord struct {
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
Type CacheRecordType `protobuf:"varint,2,opt,name=type,proto3,enum=contenthash.CacheRecordType" json:"type,omitempty"`
Linkname string `protobuf:"bytes,3,opt,name=linkname,proto3" json:"linkname,omitempty"`
}
func (m *CacheRecord) Reset() { *m = CacheRecord{} }
func (m *CacheRecord) String() string { return proto.CompactTextString(m) }
func (*CacheRecord) ProtoMessage() {}
func (*CacheRecord) Descriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{0} }
func (m *CacheRecord) GetType() CacheRecordType {
if m != nil {
return m.Type
}
return CacheRecordTypeFile
}
func (m *CacheRecord) GetLinkname() string {
if m != nil {
return m.Linkname
}
return ""
}
type CacheRecordWithPath struct {
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
Record *CacheRecord `protobuf:"bytes,2,opt,name=record" json:"record,omitempty"`
}
func (m *CacheRecordWithPath) Reset() { *m = CacheRecordWithPath{} }
func (m *CacheRecordWithPath) String() string { return proto.CompactTextString(m) }
func (*CacheRecordWithPath) ProtoMessage() {}
func (*CacheRecordWithPath) Descriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{1} }
func (m *CacheRecordWithPath) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func (m *CacheRecordWithPath) GetRecord() *CacheRecord {
if m != nil {
return m.Record
}
return nil
}
type CacheRecords struct {
Paths []*CacheRecordWithPath `protobuf:"bytes,1,rep,name=paths" json:"paths,omitempty"`
}
func (m *CacheRecords) Reset() { *m = CacheRecords{} }
func (m *CacheRecords) String() string { return proto.CompactTextString(m) }
func (*CacheRecords) ProtoMessage() {}
func (*CacheRecords) Descriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{2} }
func (m *CacheRecords) GetPaths() []*CacheRecordWithPath {
if m != nil {
return m.Paths
}
return nil
}
func init() {
proto.RegisterType((*CacheRecord)(nil), "contenthash.CacheRecord")
proto.RegisterType((*CacheRecordWithPath)(nil), "contenthash.CacheRecordWithPath")
proto.RegisterType((*CacheRecords)(nil), "contenthash.CacheRecords")
proto.RegisterEnum("contenthash.CacheRecordType", CacheRecordType_name, CacheRecordType_value)
}
func (m *CacheRecord) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CacheRecord) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Digest) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintChecksum(dAtA, i, uint64(len(m.Digest)))
i += copy(dAtA[i:], m.Digest)
}
if m.Type != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintChecksum(dAtA, i, uint64(m.Type))
}
if len(m.Linkname) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintChecksum(dAtA, i, uint64(len(m.Linkname)))
i += copy(dAtA[i:], m.Linkname)
}
return i, nil
}
func (m *CacheRecordWithPath) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CacheRecordWithPath) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Path) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintChecksum(dAtA, i, uint64(len(m.Path)))
i += copy(dAtA[i:], m.Path)
}
if m.Record != nil {
dAtA[i] = 0x12
i++
i = encodeVarintChecksum(dAtA, i, uint64(m.Record.Size()))
n1, err := m.Record.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n1
}
return i, nil
}
func (m *CacheRecords) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CacheRecords) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Paths) > 0 {
for _, msg := range m.Paths {
dAtA[i] = 0xa
i++
i = encodeVarintChecksum(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
}
func encodeVarintChecksum(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *CacheRecord) Size() (n int) {
var l int
_ = l
l = len(m.Digest)
if l > 0 {
n += 1 + l + sovChecksum(uint64(l))
}
if m.Type != 0 {
n += 1 + sovChecksum(uint64(m.Type))
}
l = len(m.Linkname)
if l > 0 {
n += 1 + l + sovChecksum(uint64(l))
}
return n
}
func (m *CacheRecordWithPath) Size() (n int) {
var l int
_ = l
l = len(m.Path)
if l > 0 {
n += 1 + l + sovChecksum(uint64(l))
}
if m.Record != nil {
l = m.Record.Size()
n += 1 + l + sovChecksum(uint64(l))
}
return n
}
func (m *CacheRecords) Size() (n int) {
var l int
_ = l
if len(m.Paths) > 0 {
for _, e := range m.Paths {
l = e.Size()
n += 1 + l + sovChecksum(uint64(l))
}
}
return n
}
func sovChecksum(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozChecksum(x uint64) (n int) {
return sovChecksum(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *CacheRecord) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CacheRecord: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CacheRecord: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthChecksum
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Type |= (CacheRecordType(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Linkname", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthChecksum
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Linkname = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipChecksum(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthChecksum
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CacheRecordWithPath) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CacheRecordWithPath: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CacheRecordWithPath: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthChecksum
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Path = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthChecksum
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Record == nil {
m.Record = &CacheRecord{}
}
if err := m.Record.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipChecksum(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthChecksum
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CacheRecords) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CacheRecords: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CacheRecords: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthChecksum
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Paths = append(m.Paths, &CacheRecordWithPath{})
if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipChecksum(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthChecksum
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipChecksum(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowChecksum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowChecksum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowChecksum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthChecksum
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowChecksum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipChecksum(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthChecksum = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowChecksum = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("checksum.proto", fileDescriptorChecksum) }
var fileDescriptorChecksum = []byte{
// 418 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xc1, 0x6a, 0xd4, 0x40,
0x18, 0xc7, 0x77, 0xba, 0xeb, 0xaa, 0xdf, 0x4a, 0x0d, 0x53, 0x68, 0xc3, 0x50, 0xb2, 0xe3, 0x5e,
0x5c, 0x8a, 0xcd, 0x96, 0x08, 0xde, 0xad, 0xd9, 0xa5, 0xd1, 0x2a, 0x32, 0x15, 0x44, 0x3c, 0x48,
0x36, 0x3b, 0x66, 0x42, 0x9b, 0x4c, 0x48, 0x66, 0x0f, 0xfb, 0x06, 0x92, 0x93, 0x2f, 0x90, 0x93,
0x82, 0xef, 0xe0, 0x5d, 0xe8, 0xd1, 0xb3, 0x87, 0x22, 0xeb, 0x8b, 0x48, 0x26, 0x55, 0x42, 0xca,
0x9e, 0xe6, 0xfb, 0x66, 0x7e, 0xdf, 0xff, 0xff, 0x9f, 0x61, 0x60, 0x3b, 0x10, 0x3c, 0x38, 0xcf,
0x97, 0xb1, 0x9d, 0x66, 0x52, 0x49, 0x3c, 0x08, 0x64, 0xa2, 0x78, 0xa2, 0x84, 0x9f, 0x0b, 0x72,
0x18, 0x46, 0x4a, 0x2c, 0xe7, 0x76, 0x20, 0xe3, 0x49, 0x28, 0x43, 0x39, 0xd1, 0xcc, 0x7c, 0xf9,
0x51, 0x77, 0xba, 0xd1, 0x55, 0x3d, 0x3b, 0xfa, 0x86, 0x60, 0xf0, 0xcc, 0x0f, 0x04, 0x67, 0x3c,
0x90, 0xd9, 0x02, 0x3f, 0x87, 0xfe, 0x22, 0x0a, 0x79, 0xae, 0x4c, 0x44, 0xd1, 0xf8, 0xee, 0xb1,
0x73, 0x79, 0x35, 0xec, 0xfc, 0xba, 0x1a, 0x1e, 0x34, 0x64, 0x65, 0xca, 0x93, 0xca, 0xd2, 0x8f,
0x12, 0x9e, 0xe5, 0x93, 0x50, 0x1e, 0xd6, 0x23, 0xb6, 0xab, 0x17, 0x76, 0xad, 0x80, 0x8f, 0xa0,
0xa7, 0x56, 0x29, 0x37, 0xb7, 0x28, 0x1a, 0x6f, 0x3b, 0xfb, 0x76, 0x23, 0xa6, 0xdd, 0xf0, 0x7c,
0xb3, 0x4a, 0x39, 0xd3, 0x24, 0x26, 0x70, 0xe7, 0x22, 0x4a, 0xce, 0x13, 0x3f, 0xe6, 0x66, 0xb7,
0xf2, 0x67, 0xff, 0xfb, 0xd1, 0x7b, 0xd8, 0x69, 0x0c, 0xbd, 0x8d, 0x94, 0x78, 0xed, 0x2b, 0x81,
0x31, 0xf4, 0x52, 0x5f, 0x89, 0x3a, 0x2e, 0xd3, 0x35, 0x3e, 0x82, 0x7e, 0xa6, 0x29, 0x6d, 0x3d,
0x70, 0xcc, 0x4d, 0xd6, 0xec, 0x9a, 0x1b, 0xcd, 0xe0, 0x5e, 0x63, 0x3b, 0xc7, 0x4f, 0xe0, 0x56,
0xa5, 0x94, 0x9b, 0x88, 0x76, 0xc7, 0x03, 0x87, 0x6e, 0x12, 0xf8, 0x17, 0x83, 0xd5, 0xf8, 0xc1,
0x0f, 0x04, 0xf7, 0x5b, 0x57, 0xc3, 0x0f, 0xa0, 0x37, 0xf3, 0x4e, 0xa7, 0x46, 0x87, 0xec, 0x15,
0x25, 0xdd, 0x69, 0x1d, 0xcf, 0xa2, 0x0b, 0x8e, 0x87, 0xd0, 0x75, 0x3d, 0x66, 0x20, 0xb2, 0x5b,
0x94, 0x14, 0xb7, 0x08, 0x37, 0xca, 0xf0, 0x23, 0x00, 0xd7, 0x63, 0x1f, 0x4e, 0xa6, 0x4f, 0xdd,
0x29, 0x33, 0xb6, 0xc8, 0x7e, 0x51, 0x52, 0xf3, 0x26, 0x77, 0xc2, 0xfd, 0x05, 0xcf, 0xf0, 0x43,
0xb8, 0x7d, 0xf6, 0xee, 0xe5, 0xa9, 0xf7, 0xea, 0x85, 0xd1, 0x25, 0xa4, 0x28, 0xe9, 0x6e, 0x0b,
0x3d, 0x5b, 0xc5, 0xd5, 0xbb, 0x92, 0xbd, 0x4f, 0x5f, 0xac, 0xce, 0xf7, 0xaf, 0x56, 0x3b, 0xf3,
0xb1, 0x71, 0xb9, 0xb6, 0xd0, 0xcf, 0xb5, 0x85, 0x7e, 0xaf, 0x2d, 0xf4, 0xf9, 0x8f, 0xd5, 0x99,
0xf7, 0xf5, 0x7f, 0x79, 0xfc, 0x37, 0x00, 0x00, 0xff, 0xff, 0x55, 0xf2, 0x2e, 0x06, 0x7d, 0x02,
0x00, 0x00,
}

View file

@ -0,0 +1,30 @@
syntax = "proto3";
package contenthash;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
enum CacheRecordType {
option (gogoproto.goproto_enum_prefix) = false;
option (gogoproto.enum_customname) = "CacheRecordType";
FILE = 0 [(gogoproto.enumvalue_customname) = "CacheRecordTypeFile"];
DIR = 1 [(gogoproto.enumvalue_customname) = "CacheRecordTypeDir"];
DIR_HEADER = 2 [(gogoproto.enumvalue_customname) = "CacheRecordTypeDirHeader"];
SYMLINK = 3 [(gogoproto.enumvalue_customname) = "CacheRecordTypeSymlink"];
}
message CacheRecord {
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
CacheRecordType type = 2;
string linkname = 3;
}
message CacheRecordWithPath {
string path = 1;
CacheRecord record = 2;
}
message CacheRecords {
repeated CacheRecordWithPath paths = 1;
}

View file

@ -0,0 +1,98 @@
package contenthash
import (
"archive/tar"
"crypto/sha256"
"hash"
"os"
"path/filepath"
"time"
"github.com/tonistiigi/fsutil"
)
// NewFileHash returns new hash that is used for the builder cache keys
func NewFileHash(path string, fi os.FileInfo) (hash.Hash, error) {
var link string
if fi.Mode()&os.ModeSymlink != 0 {
var err error
link, err = os.Readlink(path)
if err != nil {
return nil, err
}
}
stat := &fsutil.Stat{
Mode: uint32(fi.Mode()),
Size_: fi.Size(),
ModTime: fi.ModTime().UnixNano(),
Linkname: link,
}
if fi.Mode()&os.ModeSymlink != 0 {
stat.Mode = stat.Mode | 0777
}
if err := setUnixOpt(path, fi, stat); err != nil {
return nil, err
}
return NewFromStat(stat)
}
func NewFromStat(stat *fsutil.Stat) (hash.Hash, error) {
fi := &statInfo{stat}
hdr, err := tar.FileInfoHeader(fi, stat.Linkname)
if err != nil {
return nil, err
}
hdr.Name = "" // note: empty name is different from current has in docker build. Name is added on recursive directory scan instead
hdr.Mode = int64(chmodWindowsTarEntry(os.FileMode(hdr.Mode)))
hdr.Devmajor = stat.Devmajor
hdr.Devminor = stat.Devminor
if len(stat.Xattrs) > 0 {
hdr.Xattrs = make(map[string]string, len(stat.Xattrs))
for k, v := range stat.Xattrs {
hdr.Xattrs[k] = string(v)
}
}
// fmt.Printf("hdr: %#v\n", hdr)
tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()}
tsh.Reset() // initialize header
return tsh, nil
}
type tarsumHash struct {
hash.Hash
hdr *tar.Header
}
// Reset resets the Hash to its initial state.
func (tsh *tarsumHash) Reset() {
// comply with hash.Hash and reset to the state hash had before any writes
tsh.Hash.Reset()
WriteV1TarsumHeaders(tsh.hdr, tsh.Hash)
}
type statInfo struct {
*fsutil.Stat
}
func (s *statInfo) Name() string {
return filepath.Base(s.Stat.Path)
}
func (s *statInfo) Size() int64 {
return s.Stat.Size_
}
func (s *statInfo) Mode() os.FileMode {
return os.FileMode(s.Stat.Mode)
}
func (s *statInfo) ModTime() time.Time {
return time.Unix(s.Stat.ModTime/1e9, s.Stat.ModTime%1e9)
}
func (s *statInfo) IsDir() bool {
return s.Mode().IsDir()
}
func (s *statInfo) Sys() interface{} {
return s.Stat
}

View file

@ -0,0 +1,47 @@
// +build !windows
package contenthash
import (
"os"
"syscall"
"github.com/containerd/continuity/sysx"
"github.com/tonistiigi/fsutil"
"golang.org/x/sys/unix"
)
func chmodWindowsTarEntry(perm os.FileMode) os.FileMode {
return perm
}
func setUnixOpt(path string, fi os.FileInfo, stat *fsutil.Stat) error {
s := fi.Sys().(*syscall.Stat_t)
stat.Uid = s.Uid
stat.Gid = s.Gid
if !fi.IsDir() {
if s.Mode&syscall.S_IFBLK != 0 ||
s.Mode&syscall.S_IFCHR != 0 {
stat.Devmajor = int64(unix.Major(uint64(s.Rdev)))
stat.Devminor = int64(unix.Minor(uint64(s.Rdev)))
}
}
attrs, err := sysx.LListxattr(path)
if err != nil {
return err
}
if len(attrs) > 0 {
stat.Xattrs = map[string][]byte{}
for _, attr := range attrs {
v, err := sysx.LGetxattr(path, attr)
if err == nil {
stat.Xattrs[attr] = v
}
}
}
return nil
}

View file

@ -0,0 +1,23 @@
// +build windows
package contenthash
import (
"os"
"github.com/tonistiigi/fsutil"
)
// chmodWindowsTarEntry is used to adjust the file permissions used in tar
// header based on the platform the archival is done.
func chmodWindowsTarEntry(perm os.FileMode) os.FileMode {
perm &= 0755
// Add the x bit: make everything +x from windows
perm |= 0111
return perm
}
func setUnixOpt(path string, fi os.FileInfo, stat *fsutil.Stat) error {
return nil
}

View file

@ -0,0 +1,3 @@
package contenthash
//go:generate protoc -I=. -I=../../vendor/ --gogofaster_out=. checksum.proto

View file

@ -0,0 +1,60 @@
package contenthash
import (
"archive/tar"
"io"
"sort"
"strconv"
)
// WriteV1TarsumHeaders writes a tar header to a writer in V1 tarsum format.
func WriteV1TarsumHeaders(h *tar.Header, w io.Writer) {
for _, elem := range v1TarHeaderSelect(h) {
w.Write([]byte(elem[0] + elem[1]))
}
}
// Functions below are from docker legacy tarsum implementation.
// There is no valid technical reason to continue using them.
func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
return [][2]string{
{"name", h.Name},
{"mode", strconv.FormatInt(h.Mode, 10)},
{"uid", strconv.Itoa(h.Uid)},
{"gid", strconv.Itoa(h.Gid)},
{"size", strconv.FormatInt(h.Size, 10)},
{"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)},
{"typeflag", string([]byte{h.Typeflag})},
{"linkname", h.Linkname},
{"uname", h.Uname},
{"gname", h.Gname},
{"devmajor", strconv.FormatInt(h.Devmajor, 10)},
{"devminor", strconv.FormatInt(h.Devminor, 10)},
}
}
func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
// Get extended attributes.
xAttrKeys := make([]string, len(h.Xattrs))
for k := range h.Xattrs {
xAttrKeys = append(xAttrKeys, k)
}
sort.Strings(xAttrKeys)
// Make the slice with enough capacity to hold the 11 basic headers
// we want from the v0 selector plus however many xattrs we have.
orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys))
// Copy all headers from v0 excluding the 'mtime' header (the 5th element).
v0headers := v0TarHeaderSelect(h)
orderedHeaders = append(orderedHeaders, v0headers[0:5]...)
orderedHeaders = append(orderedHeaders, v0headers[6:]...)
// Finally, append the sorted xattrs.
for _, k := range xAttrKeys {
orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]})
}
return
}

71
vendor/github.com/moby/buildkit/cache/fsutil.go generated vendored Normal file
View file

@ -0,0 +1,71 @@
package cache
import (
"context"
"io"
"io/ioutil"
"os"
"github.com/containerd/continuity/fs"
"github.com/moby/buildkit/snapshot"
)
type ReadRequest struct {
Filename string
Range *FileRange
}
type FileRange struct {
Offset int
Length int
}
func ReadFile(ctx context.Context, ref ImmutableRef, req ReadRequest) ([]byte, error) {
mount, err := ref.Mount(ctx, true)
if err != nil {
return nil, err
}
lm := snapshot.LocalMounter(mount)
root, err := lm.Mount()
if err != nil {
return nil, err
}
defer func() {
if lm != nil {
lm.Unmount()
}
}()
fp, err := fs.RootPath(root, req.Filename)
if err != nil {
return nil, err
}
var dt []byte
if req.Range == nil {
dt, err = ioutil.ReadFile(fp)
if err != nil {
return nil, err
}
} else {
f, err := os.Open(fp)
if err != nil {
return nil, err
}
dt, err = ioutil.ReadAll(io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length)))
f.Close()
if err != nil {
return nil, err
}
}
if err := lm.Unmount(); err != nil {
return nil, err
}
lm = nil
return dt, err
}

27
vendor/github.com/moby/buildkit/cache/gc.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
package cache
import (
"context"
"errors"
"time"
)
// GCPolicy defines policy for garbage collection
type GCPolicy struct {
MaxSize uint64
MaxKeepDuration time.Duration
}
// // CachePolicy defines policy for keeping a resource in cache
// type CachePolicy struct {
// Priority int
// LastUsed time.Time
// }
//
// func defaultCachePolicy() CachePolicy {
// return CachePolicy{Priority: 10, LastUsed: time.Now()}
// }
func (cm *cacheManager) GC(ctx context.Context) error {
return errors.New("GC not implemented")
}

573
vendor/github.com/moby/buildkit/cache/manager.go generated vendored Normal file
View file

@ -0,0 +1,573 @@
package cache
import (
"context"
"strings"
"sync"
"time"
"github.com/containerd/containerd/snapshots"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/snapshot"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
var (
errLocked = errors.New("locked")
errNotFound = errors.New("not found")
errInvalid = errors.New("invalid")
)
type ManagerOpt struct {
Snapshotter snapshot.SnapshotterBase
GCPolicy GCPolicy
MetadataStore *metadata.Store
}
type Accessor interface {
Get(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error)
GetFromSnapshotter(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error)
New(ctx context.Context, s ImmutableRef, opts ...RefOption) (MutableRef, error)
GetMutable(ctx context.Context, id string) (MutableRef, error) // Rebase?
}
type Controller interface {
DiskUsage(ctx context.Context, info client.DiskUsageInfo) ([]*client.UsageInfo, error)
Prune(ctx context.Context, ch chan client.UsageInfo) error
GC(ctx context.Context) error
}
type Manager interface {
Accessor
Controller
Close() error
}
type cacheManager struct {
records map[string]*cacheRecord
mu sync.Mutex
ManagerOpt
md *metadata.Store
muPrune sync.Mutex // make sure parallel prune is not allowed so there will not be inconsistent results
}
func NewManager(opt ManagerOpt) (Manager, error) {
cm := &cacheManager{
ManagerOpt: opt,
md: opt.MetadataStore,
records: make(map[string]*cacheRecord),
}
if err := cm.init(context.TODO()); err != nil {
return nil, err
}
// cm.scheduleGC(5 * time.Minute)
return cm, nil
}
// init loads all snapshots from metadata state and tries to load the records
// from the snapshotter. If snaphot can't be found, metadata is deleted as well.
func (cm *cacheManager) init(ctx context.Context) error {
items, err := cm.md.All()
if err != nil {
return err
}
for _, si := range items {
if _, err := cm.getRecord(ctx, si.ID(), false); err != nil {
logrus.Debugf("could not load snapshot %s: %v", si.ID(), err)
cm.md.Clear(si.ID())
// TODO: make sure content is deleted as well
}
}
return nil
}
// Close closes the manager and releases the metadata database lock. No other
// method should be called after Close.
func (cm *cacheManager) Close() error {
// TODO: allocate internal context and cancel it here
return cm.md.Close()
}
// Get returns an immutable snapshot reference for ID
func (cm *cacheManager) Get(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error) {
cm.mu.Lock()
defer cm.mu.Unlock()
return cm.get(ctx, id, false, opts...)
}
// Get returns an immutable snapshot reference for ID
func (cm *cacheManager) GetFromSnapshotter(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error) {
cm.mu.Lock()
defer cm.mu.Unlock()
return cm.get(ctx, id, true, opts...)
}
// get requires manager lock to be taken
func (cm *cacheManager) get(ctx context.Context, id string, fromSnapshotter bool, opts ...RefOption) (ImmutableRef, error) {
rec, err := cm.getRecord(ctx, id, fromSnapshotter, opts...)
if err != nil {
return nil, err
}
rec.mu.Lock()
defer rec.mu.Unlock()
if rec.mutable {
if len(rec.refs) != 0 {
return nil, errors.Wrapf(errLocked, "%s is locked", id)
}
if rec.equalImmutable != nil {
return rec.equalImmutable.ref(), nil
}
return rec.mref().commit(ctx)
}
return rec.ref(), nil
}
// getRecord returns record for id. Requires manager lock.
func (cm *cacheManager) getRecord(ctx context.Context, id string, fromSnapshotter bool, opts ...RefOption) (cr *cacheRecord, retErr error) {
if rec, ok := cm.records[id]; ok {
if rec.isDead() {
return nil, errNotFound
}
return rec, nil
}
md, ok := cm.md.Get(id)
if !ok && !fromSnapshotter {
return nil, errNotFound
}
if mutableID := getEqualMutable(md); mutableID != "" {
mutable, err := cm.getRecord(ctx, mutableID, fromSnapshotter)
if err != nil {
// check loading mutable deleted record from disk
if errors.Cause(err) == errNotFound {
cm.md.Clear(id)
}
return nil, err
}
rec := &cacheRecord{
mu: &sync.Mutex{},
cm: cm,
refs: make(map[Mountable]struct{}),
parent: mutable.Parent(),
md: md,
equalMutable: &mutableRef{cacheRecord: mutable},
}
mutable.equalImmutable = &immutableRef{cacheRecord: rec}
cm.records[id] = rec
return rec, nil
}
info, err := cm.Snapshotter.Stat(ctx, id)
if err != nil {
return nil, errors.Wrap(errNotFound, err.Error())
}
var parent ImmutableRef
if info.Parent != "" {
parent, err = cm.get(ctx, info.Parent, fromSnapshotter, opts...)
if err != nil {
return nil, err
}
defer func() {
if retErr != nil {
parent.Release(context.TODO())
}
}()
}
rec := &cacheRecord{
mu: &sync.Mutex{},
mutable: info.Kind != snapshots.KindCommitted,
cm: cm,
refs: make(map[Mountable]struct{}),
parent: parent,
md: md,
}
// the record was deleted but we crashed before data on disk was removed
if getDeleted(md) {
if err := rec.remove(ctx, true); err != nil {
return nil, err
}
return nil, errNotFound
}
if err := initializeMetadata(rec, opts...); err != nil {
if parent != nil {
parent.Release(context.TODO())
}
return nil, err
}
cm.records[id] = rec
return rec, nil
}
func (cm *cacheManager) New(ctx context.Context, s ImmutableRef, opts ...RefOption) (MutableRef, error) {
id := identity.NewID()
var parent ImmutableRef
var parentID string
if s != nil {
var err error
parent, err = cm.Get(ctx, s.ID())
if err != nil {
return nil, err
}
if err := parent.Finalize(ctx); err != nil {
return nil, err
}
parentID = parent.ID()
}
if err := cm.Snapshotter.Prepare(ctx, id, parentID); err != nil {
if parent != nil {
parent.Release(context.TODO())
}
return nil, errors.Wrapf(err, "failed to prepare %s", id)
}
md, _ := cm.md.Get(id)
rec := &cacheRecord{
mu: &sync.Mutex{},
mutable: true,
cm: cm,
refs: make(map[Mountable]struct{}),
parent: parent,
md: md,
}
if err := initializeMetadata(rec, opts...); err != nil {
if parent != nil {
parent.Release(context.TODO())
}
return nil, err
}
cm.mu.Lock()
defer cm.mu.Unlock()
cm.records[id] = rec // TODO: save to db
return rec.mref(), nil
}
func (cm *cacheManager) GetMutable(ctx context.Context, id string) (MutableRef, error) {
cm.mu.Lock()
defer cm.mu.Unlock()
rec, err := cm.getRecord(ctx, id, false)
if err != nil {
return nil, err
}
rec.mu.Lock()
defer rec.mu.Unlock()
if !rec.mutable {
return nil, errors.Wrapf(errInvalid, "%s is not mutable", id)
}
if len(rec.refs) != 0 {
return nil, errors.Wrapf(errLocked, "%s is locked", id)
}
if rec.equalImmutable != nil {
if len(rec.equalImmutable.refs) != 0 {
return nil, errors.Wrapf(errLocked, "%s is locked", id)
}
delete(cm.records, rec.equalImmutable.ID())
if err := rec.equalImmutable.remove(ctx, false); err != nil {
return nil, err
}
rec.equalImmutable = nil
}
return rec.mref(), nil
}
func (cm *cacheManager) Prune(ctx context.Context, ch chan client.UsageInfo) error {
cm.muPrune.Lock()
defer cm.muPrune.Unlock()
return cm.prune(ctx, ch)
}
func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo) error {
var toDelete []*cacheRecord
cm.mu.Lock()
for _, cr := range cm.records {
cr.mu.Lock()
// ignore duplicates that share data
if cr.equalImmutable != nil && len(cr.equalImmutable.refs) > 0 || cr.equalMutable != nil && len(cr.refs) == 0 {
cr.mu.Unlock()
continue
}
if cr.isDead() {
cr.mu.Unlock()
continue
}
if len(cr.refs) == 0 {
cr.dead = true
toDelete = append(toDelete, cr)
}
// mark metadata as deleted in case we crash before cleanup finished
if err := setDeleted(cr.md); err != nil {
cr.mu.Unlock()
cm.mu.Unlock()
return err
}
cr.mu.Unlock()
}
cm.mu.Unlock()
if len(toDelete) == 0 {
return nil
}
var err error
for _, cr := range toDelete {
cr.mu.Lock()
usageCount, lastUsedAt := getLastUsed(cr.md)
c := client.UsageInfo{
ID: cr.ID(),
Mutable: cr.mutable,
InUse: len(cr.refs) > 0,
Size: getSize(cr.md),
CreatedAt: GetCreatedAt(cr.md),
Description: GetDescription(cr.md),
LastUsedAt: lastUsedAt,
UsageCount: usageCount,
}
if cr.parent != nil {
c.Parent = cr.parent.ID()
}
if c.Size == sizeUnknown {
cr.mu.Unlock() // all the non-prune modifications already protected by cr.dead
s, err := cr.Size(ctx)
if err != nil {
return err
}
c.Size = s
cr.mu.Lock()
}
if cr.equalImmutable != nil {
if err1 := cr.equalImmutable.remove(ctx, false); err == nil {
err = err1
}
}
if err1 := cr.remove(ctx, true); err == nil {
err = err1
}
if err == nil && ch != nil {
ch <- c
}
cr.mu.Unlock()
}
if err != nil {
return err
}
select {
case <-ctx.Done():
return ctx.Err()
default:
return cm.prune(ctx, ch)
}
}
func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) {
cm.mu.Lock()
type cacheUsageInfo struct {
refs int
parent string
size int64
mutable bool
createdAt time.Time
usageCount int
lastUsedAt *time.Time
description string
doubleRef bool
}
m := make(map[string]*cacheUsageInfo, len(cm.records))
rescan := make(map[string]struct{}, len(cm.records))
for id, cr := range cm.records {
cr.mu.Lock()
// ignore duplicates that share data
if cr.equalImmutable != nil && len(cr.equalImmutable.refs) > 0 || cr.equalMutable != nil && len(cr.refs) == 0 {
cr.mu.Unlock()
continue
}
usageCount, lastUsedAt := getLastUsed(cr.md)
c := &cacheUsageInfo{
refs: len(cr.refs),
mutable: cr.mutable,
size: getSize(cr.md),
createdAt: GetCreatedAt(cr.md),
usageCount: usageCount,
lastUsedAt: lastUsedAt,
description: GetDescription(cr.md),
doubleRef: cr.equalImmutable != nil,
}
if cr.parent != nil {
c.parent = cr.parent.ID()
}
if cr.mutable && c.refs > 0 {
c.size = 0 // size can not be determined because it is changing
}
m[id] = c
rescan[id] = struct{}{}
cr.mu.Unlock()
}
cm.mu.Unlock()
for {
if len(rescan) == 0 {
break
}
for id := range rescan {
v := m[id]
if v.refs == 0 && v.parent != "" {
m[v.parent].refs--
if v.doubleRef {
m[v.parent].refs--
}
rescan[v.parent] = struct{}{}
}
delete(rescan, id)
}
}
var du []*client.UsageInfo
for id, cr := range m {
if opt.Filter != "" && !strings.HasPrefix(id, opt.Filter) {
continue
}
c := &client.UsageInfo{
ID: id,
Mutable: cr.mutable,
InUse: cr.refs > 0,
Size: cr.size,
Parent: cr.parent,
CreatedAt: cr.createdAt,
Description: cr.description,
LastUsedAt: cr.lastUsedAt,
UsageCount: cr.usageCount,
}
du = append(du, c)
}
eg, ctx := errgroup.WithContext(ctx)
for _, d := range du {
if d.Size == sizeUnknown {
func(d *client.UsageInfo) {
eg.Go(func() error {
ref, err := cm.Get(ctx, d.ID)
if err != nil {
d.Size = 0
return nil
}
s, err := ref.Size(ctx)
if err != nil {
return err
}
d.Size = s
return ref.Release(context.TODO())
})
}(d)
}
}
if err := eg.Wait(); err != nil {
return du, err
}
return du, nil
}
func IsLocked(err error) bool {
return errors.Cause(err) == errLocked
}
func IsNotFound(err error) bool {
return errors.Cause(err) == errNotFound
}
type RefOption func(withMetadata) error
type cachePolicy int
const (
cachePolicyDefault cachePolicy = iota
cachePolicyRetain
)
type withMetadata interface {
Metadata() *metadata.StorageItem
}
func HasCachePolicyRetain(m withMetadata) bool {
return getCachePolicy(m.Metadata()) == cachePolicyRetain
}
func CachePolicyRetain(m withMetadata) error {
return queueCachePolicy(m.Metadata(), cachePolicyRetain)
}
func WithDescription(descr string) RefOption {
return func(m withMetadata) error {
return queueDescription(m.Metadata(), descr)
}
}
func WithCreationTime(tm time.Time) RefOption {
return func(m withMetadata) error {
return queueCreatedAt(m.Metadata(), tm)
}
}
func initializeMetadata(m withMetadata, opts ...RefOption) error {
md := m.Metadata()
if tm := GetCreatedAt(md); !tm.IsZero() {
return nil
}
if err := queueCreatedAt(md, time.Now()); err != nil {
return err
}
for _, opt := range opts {
if err := opt(m); err != nil {
return err
}
}
return md.Commit()
}

206
vendor/github.com/moby/buildkit/cache/metadata.go generated vendored Normal file
View file

@ -0,0 +1,206 @@
package cache
import (
"time"
"github.com/boltdb/bolt"
"github.com/moby/buildkit/cache/metadata"
"github.com/pkg/errors"
)
const sizeUnknown int64 = -1
const keySize = "snapshot.size"
const keyEqualMutable = "cache.equalMutable"
const keyCachePolicy = "cache.cachePolicy"
const keyDescription = "cache.description"
const keyCreatedAt = "cache.createdAt"
const keyLastUsedAt = "cache.lastUsedAt"
const keyUsageCount = "cache.usageCount"
const keyDeleted = "cache.deleted"
func setDeleted(si *metadata.StorageItem) error {
v, err := metadata.NewValue(true)
if err != nil {
return errors.Wrap(err, "failed to create size value")
}
si.Update(func(b *bolt.Bucket) error {
return si.SetValue(b, keyDeleted, v)
})
return nil
}
func getDeleted(si *metadata.StorageItem) bool {
v := si.Get(keyDeleted)
if v == nil {
return false
}
var deleted bool
if err := v.Unmarshal(&deleted); err != nil {
return false
}
return deleted
}
func setSize(si *metadata.StorageItem, s int64) error {
v, err := metadata.NewValue(s)
if err != nil {
return errors.Wrap(err, "failed to create size value")
}
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, keySize, v)
})
return nil
}
func getSize(si *metadata.StorageItem) int64 {
v := si.Get(keySize)
if v == nil {
return sizeUnknown
}
var size int64
if err := v.Unmarshal(&size); err != nil {
return sizeUnknown
}
return size
}
func getEqualMutable(si *metadata.StorageItem) string {
v := si.Get(keyEqualMutable)
if v == nil {
return ""
}
var str string
if err := v.Unmarshal(&str); err != nil {
return ""
}
return str
}
func setEqualMutable(si *metadata.StorageItem, s string) error {
v, err := metadata.NewValue(s)
if err != nil {
return errors.Wrapf(err, "failed to create %s meta value", keyEqualMutable)
}
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, keyEqualMutable, v)
})
return nil
}
func clearEqualMutable(si *metadata.StorageItem) error {
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, keyEqualMutable, nil)
})
return nil
}
func queueCachePolicy(si *metadata.StorageItem, p cachePolicy) error {
v, err := metadata.NewValue(p)
if err != nil {
return errors.Wrap(err, "failed to create cachePolicy value")
}
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, keyCachePolicy, v)
})
return nil
}
func getCachePolicy(si *metadata.StorageItem) cachePolicy {
v := si.Get(keyCachePolicy)
if v == nil {
return cachePolicyDefault
}
var p cachePolicy
if err := v.Unmarshal(&p); err != nil {
return cachePolicyDefault
}
return p
}
func queueDescription(si *metadata.StorageItem, descr string) error {
v, err := metadata.NewValue(descr)
if err != nil {
return errors.Wrap(err, "failed to create description value")
}
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, keyDescription, v)
})
return nil
}
func GetDescription(si *metadata.StorageItem) string {
v := si.Get(keyDescription)
if v == nil {
return ""
}
var str string
if err := v.Unmarshal(&str); err != nil {
return ""
}
return str
}
func queueCreatedAt(si *metadata.StorageItem, tm time.Time) error {
v, err := metadata.NewValue(tm.UnixNano())
if err != nil {
return errors.Wrap(err, "failed to create createdAt value")
}
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, keyCreatedAt, v)
})
return nil
}
func GetCreatedAt(si *metadata.StorageItem) time.Time {
v := si.Get(keyCreatedAt)
if v == nil {
return time.Time{}
}
var tm int64
if err := v.Unmarshal(&tm); err != nil {
return time.Time{}
}
return time.Unix(tm/1e9, tm%1e9)
}
func getLastUsed(si *metadata.StorageItem) (int, *time.Time) {
v := si.Get(keyUsageCount)
if v == nil {
return 0, nil
}
var usageCount int
if err := v.Unmarshal(&usageCount); err != nil {
return 0, nil
}
v = si.Get(keyLastUsedAt)
if v == nil {
return usageCount, nil
}
var lastUsedTs int64
if err := v.Unmarshal(&lastUsedTs); err != nil || lastUsedTs == 0 {
return usageCount, nil
}
tm := time.Unix(lastUsedTs/1e9, lastUsedTs%1e9)
return usageCount, &tm
}
func updateLastUsed(si *metadata.StorageItem) error {
count, _ := getLastUsed(si)
count++
v, err := metadata.NewValue(count)
if err != nil {
return errors.Wrap(err, "failed to create usageCount value")
}
v2, err := metadata.NewValue(time.Now().UnixNano())
if err != nil {
return errors.Wrap(err, "failed to create lastUsedAt value")
}
return si.Update(func(b *bolt.Bucket) error {
if err := si.SetValue(b, keyUsageCount, v); err != nil {
return err
}
return si.SetValue(b, keyLastUsedAt, v2)
})
}

View file

@ -0,0 +1,382 @@
package metadata
import (
"bytes"
"encoding/json"
"strings"
"sync"
"github.com/boltdb/bolt"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
mainBucket = "_main"
indexBucket = "_index"
externalBucket = "_external"
)
var errNotFound = errors.Errorf("not found")
type Store struct {
db *bolt.DB
}
func NewStore(dbPath string) (*Store, error) {
db, err := bolt.Open(dbPath, 0600, nil)
if err != nil {
return nil, errors.Wrapf(err, "failed to open database file %s", dbPath)
}
return &Store{db: db}, nil
}
func (s *Store) DB() *bolt.DB {
return s.db
}
func (s *Store) All() ([]*StorageItem, error) {
var out []*StorageItem
err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(mainBucket))
if b == nil {
return nil
}
return b.ForEach(func(key, _ []byte) error {
b := b.Bucket(key)
if b == nil {
return nil
}
si, err := newStorageItem(string(key), b, s)
if err != nil {
return err
}
out = append(out, si)
return nil
})
})
return out, err
}
func (s *Store) Probe(index string) (bool, error) {
var exists bool
err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(indexBucket))
if b == nil {
return nil
}
main := tx.Bucket([]byte(mainBucket))
if main == nil {
return nil
}
search := []byte(indexKey(index, ""))
c := b.Cursor()
k, _ := c.Seek(search)
if k != nil && bytes.HasPrefix(k, search) {
exists = true
}
return nil
})
return exists, err
}
func (s *Store) Search(index string) ([]*StorageItem, error) {
var out []*StorageItem
err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(indexBucket))
if b == nil {
return nil
}
main := tx.Bucket([]byte(mainBucket))
if main == nil {
return nil
}
index = indexKey(index, "")
c := b.Cursor()
k, _ := c.Seek([]byte(index))
for {
if k != nil && strings.HasPrefix(string(k), index) {
itemID := strings.TrimPrefix(string(k), index)
k, _ = c.Next()
b := main.Bucket([]byte(itemID))
if b == nil {
logrus.Errorf("index pointing to missing record %s", itemID)
continue
}
si, err := newStorageItem(itemID, b, s)
if err != nil {
return err
}
out = append(out, si)
} else {
break
}
}
return nil
})
return out, err
}
func (s *Store) View(id string, fn func(b *bolt.Bucket) error) error {
return s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(mainBucket))
if b == nil {
return errors.WithStack(errNotFound)
}
b = b.Bucket([]byte(id))
if b == nil {
return errors.WithStack(errNotFound)
}
return fn(b)
})
}
func (s *Store) Clear(id string) error {
return s.db.Update(func(tx *bolt.Tx) error {
external := tx.Bucket([]byte(externalBucket))
if external != nil {
external.DeleteBucket([]byte(id))
}
main := tx.Bucket([]byte(mainBucket))
if main == nil {
return nil
}
b := main.Bucket([]byte(id))
if b == nil {
return nil
}
si, err := newStorageItem(id, b, s)
if err != nil {
return err
}
if indexes := si.Indexes(); len(indexes) > 0 {
b := tx.Bucket([]byte(indexBucket))
if b != nil {
for _, index := range indexes {
if err := b.Delete([]byte(indexKey(index, id))); err != nil {
return err
}
}
}
}
return main.DeleteBucket([]byte(id))
})
}
func (s *Store) Update(id string, fn func(b *bolt.Bucket) error) error {
return s.db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(mainBucket))
if err != nil {
return err
}
b, err = b.CreateBucketIfNotExists([]byte(id))
if err != nil {
return err
}
return fn(b)
})
}
func (s *Store) Get(id string) (*StorageItem, bool) {
empty := func() *StorageItem {
si, _ := newStorageItem(id, nil, s)
return si
}
tx, err := s.db.Begin(false)
if err != nil {
return empty(), false
}
defer tx.Rollback()
b := tx.Bucket([]byte(mainBucket))
if b == nil {
return empty(), false
}
b = b.Bucket([]byte(id))
if b == nil {
return empty(), false
}
si, _ := newStorageItem(id, b, s)
return si, true
}
func (s *Store) Close() error {
return s.db.Close()
}
type StorageItem struct {
id string
values map[string]*Value
queue []func(*bolt.Bucket) error
storage *Store
mu sync.RWMutex
}
func newStorageItem(id string, b *bolt.Bucket, s *Store) (*StorageItem, error) {
si := &StorageItem{
id: id,
storage: s,
values: make(map[string]*Value),
}
if b != nil {
if err := b.ForEach(func(k, v []byte) error {
var sv Value
if len(v) > 0 {
if err := json.Unmarshal(v, &sv); err != nil {
return err
}
si.values[string(k)] = &sv
}
return nil
}); err != nil {
return si, err
}
}
return si, nil
}
func (s *StorageItem) Storage() *Store { // TODO: used in local source. how to remove this?
return s.storage
}
func (s *StorageItem) ID() string {
return s.id
}
func (s *StorageItem) View(fn func(b *bolt.Bucket) error) error {
return s.storage.View(s.id, fn)
}
func (s *StorageItem) Update(fn func(b *bolt.Bucket) error) error {
return s.storage.Update(s.id, fn)
}
func (s *StorageItem) Keys() []string {
keys := make([]string, 0, len(s.values))
for k := range s.values {
keys = append(keys, k)
}
return keys
}
func (s *StorageItem) Get(k string) *Value {
s.mu.RLock()
v := s.values[k]
s.mu.RUnlock()
return v
}
func (s *StorageItem) GetExternal(k string) ([]byte, error) {
var dt []byte
err := s.storage.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(externalBucket))
if b == nil {
return errors.WithStack(errNotFound)
}
b = b.Bucket([]byte(s.id))
if b == nil {
return errors.WithStack(errNotFound)
}
dt = b.Get([]byte(k))
if dt == nil {
return errors.WithStack(errNotFound)
}
return nil
})
if err != nil {
return nil, err
}
return dt, nil
}
func (s *StorageItem) SetExternal(k string, dt []byte) error {
return s.storage.db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(externalBucket))
if err != nil {
return err
}
b, err = b.CreateBucketIfNotExists([]byte(s.id))
if err != nil {
return err
}
return b.Put([]byte(k), dt)
})
}
func (s *StorageItem) Queue(fn func(b *bolt.Bucket) error) {
s.mu.Lock()
defer s.mu.Unlock()
s.queue = append(s.queue, fn)
}
func (s *StorageItem) Commit() error {
s.mu.Lock()
defer s.mu.Unlock()
return s.Update(func(b *bolt.Bucket) error {
for _, fn := range s.queue {
if err := fn(b); err != nil {
return err
}
}
s.queue = s.queue[:0]
return nil
})
}
func (s *StorageItem) Indexes() (out []string) {
for _, v := range s.values {
if v.Index != "" {
out = append(out, v.Index)
}
}
return
}
func (s *StorageItem) SetValue(b *bolt.Bucket, key string, v *Value) error {
if v == nil {
if err := b.Put([]byte(key), nil); err != nil {
return err
}
delete(s.values, key)
return nil
}
dt, err := json.Marshal(v)
if err != nil {
return err
}
if err := b.Put([]byte(key), dt); err != nil {
return err
}
if v.Index != "" {
b, err := b.Tx().CreateBucketIfNotExists([]byte(indexBucket))
if err != nil {
return err
}
if err := b.Put([]byte(indexKey(v.Index, s.ID())), []byte{}); err != nil {
return err
}
}
s.values[key] = v
return nil
}
type Value struct {
Value json.RawMessage `json:"value,omitempty"`
Index string `json:"index,omitempty"`
}
func NewValue(v interface{}) (*Value, error) {
dt, err := json.Marshal(v)
if err != nil {
return nil, err
}
return &Value{Value: json.RawMessage(dt)}, nil
}
func (v *Value) Unmarshal(target interface{}) error {
err := json.Unmarshal(v.Value, target)
return err
}
func indexKey(index, target string) string {
return index + "::" + target
}

380
vendor/github.com/moby/buildkit/cache/refs.go generated vendored Normal file
View file

@ -0,0 +1,380 @@
package cache
import (
"context"
"sync"
"github.com/containerd/containerd/mount"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/util/flightcontrol"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// Ref is a reference to cacheable objects.
type Ref interface {
Mountable
ID() string
Release(context.Context) error
Size(ctx context.Context) (int64, error)
Metadata() *metadata.StorageItem
}
type ImmutableRef interface {
Ref
Parent() ImmutableRef
Finalize(ctx context.Context) error // Make sure reference is flushed to driver
Clone() ImmutableRef
}
type MutableRef interface {
Ref
Commit(context.Context) (ImmutableRef, error)
}
type Mountable interface {
Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error)
}
type cacheRecord struct {
cm *cacheManager
mu *sync.Mutex // the mutex is shared by records sharing data
mutable bool
refs map[Mountable]struct{}
parent ImmutableRef
md *metadata.StorageItem
// dead means record is marked as deleted
dead bool
view string
viewMount snapshot.Mountable
sizeG flightcontrol.Group
// these are filled if multiple refs point to same data
equalMutable *mutableRef
equalImmutable *immutableRef
}
// hold ref lock before calling
func (cr *cacheRecord) ref() *immutableRef {
ref := &immutableRef{cacheRecord: cr}
cr.refs[ref] = struct{}{}
return ref
}
// hold ref lock before calling
func (cr *cacheRecord) mref() *mutableRef {
ref := &mutableRef{cacheRecord: cr}
cr.refs[ref] = struct{}{}
return ref
}
// hold ref lock before calling
func (cr *cacheRecord) isDead() bool {
return cr.dead || (cr.equalImmutable != nil && cr.equalImmutable.dead) || (cr.equalMutable != nil && cr.equalMutable.dead)
}
func (cr *cacheRecord) Size(ctx context.Context) (int64, error) {
// this expects that usage() is implemented lazily
s, err := cr.sizeG.Do(ctx, cr.ID(), func(ctx context.Context) (interface{}, error) {
cr.mu.Lock()
s := getSize(cr.md)
if s != sizeUnknown {
cr.mu.Unlock()
return s, nil
}
driverID := cr.ID()
if cr.equalMutable != nil {
driverID = cr.equalMutable.ID()
}
cr.mu.Unlock()
usage, err := cr.cm.ManagerOpt.Snapshotter.Usage(ctx, driverID)
if err != nil {
cr.mu.Lock()
isDead := cr.isDead()
cr.mu.Unlock()
if isDead {
return int64(0), nil
}
return s, errors.Wrapf(err, "failed to get usage for %s", cr.ID())
}
cr.mu.Lock()
setSize(cr.md, usage.Size)
if err := cr.md.Commit(); err != nil {
cr.mu.Unlock()
return s, err
}
cr.mu.Unlock()
return usage.Size, nil
})
return s.(int64), err
}
func (cr *cacheRecord) Parent() ImmutableRef {
if cr.parent == nil {
return nil
}
p := cr.parent.(*immutableRef)
p.mu.Lock()
defer p.mu.Unlock()
return p.ref()
}
func (cr *cacheRecord) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
cr.mu.Lock()
defer cr.mu.Unlock()
if cr.mutable {
m, err := cr.cm.Snapshotter.Mounts(ctx, cr.ID())
if err != nil {
return nil, errors.Wrapf(err, "failed to mount %s", cr.ID())
}
if readonly {
m = setReadonly(m)
}
return m, nil
}
if cr.equalMutable != nil && readonly {
m, err := cr.cm.Snapshotter.Mounts(ctx, cr.equalMutable.ID())
if err != nil {
return nil, errors.Wrapf(err, "failed to mount %s", cr.equalMutable.ID())
}
return setReadonly(m), nil
}
if err := cr.finalize(ctx); err != nil {
return nil, err
}
if cr.viewMount == nil { // TODO: handle this better
cr.view = identity.NewID()
m, err := cr.cm.Snapshotter.View(ctx, cr.view, cr.ID())
if err != nil {
cr.view = ""
return nil, errors.Wrapf(err, "failed to mount %s", cr.ID())
}
cr.viewMount = m
}
return cr.viewMount, nil
}
// call when holding the manager lock
func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) error {
delete(cr.cm.records, cr.ID())
if cr.parent != nil {
if err := cr.parent.(*immutableRef).release(ctx); err != nil {
return err
}
}
if removeSnapshot {
if err := cr.cm.Snapshotter.Remove(ctx, cr.ID()); err != nil {
return err
}
}
if err := cr.cm.md.Clear(cr.ID()); err != nil {
return err
}
return nil
}
func (cr *cacheRecord) ID() string {
return cr.md.ID()
}
type immutableRef struct {
*cacheRecord
}
type mutableRef struct {
*cacheRecord
}
func (sr *immutableRef) Clone() ImmutableRef {
sr.mu.Lock()
ref := sr.ref()
sr.mu.Unlock()
return ref
}
func (sr *immutableRef) Release(ctx context.Context) error {
sr.cm.mu.Lock()
defer sr.cm.mu.Unlock()
sr.mu.Lock()
defer sr.mu.Unlock()
return sr.release(ctx)
}
func (sr *immutableRef) release(ctx context.Context) error {
delete(sr.refs, sr)
if len(sr.refs) == 0 {
updateLastUsed(sr.md)
if sr.viewMount != nil { // TODO: release viewMount earlier if possible
if err := sr.cm.Snapshotter.Remove(ctx, sr.view); err != nil {
return err
}
sr.view = ""
sr.viewMount = nil
}
if sr.equalMutable != nil {
sr.equalMutable.release(ctx)
}
// go sr.cm.GC()
}
return nil
}
func (sr *immutableRef) Finalize(ctx context.Context) error {
sr.mu.Lock()
defer sr.mu.Unlock()
return sr.finalize(ctx)
}
func (cr *cacheRecord) Metadata() *metadata.StorageItem {
return cr.md
}
func (cr *cacheRecord) finalize(ctx context.Context) error {
mutable := cr.equalMutable
if mutable == nil {
return nil
}
err := cr.cm.Snapshotter.Commit(ctx, cr.ID(), mutable.ID())
if err != nil {
return errors.Wrapf(err, "failed to commit %s", mutable.ID())
}
mutable.dead = true
go func() {
cr.cm.mu.Lock()
defer cr.cm.mu.Unlock()
if err := mutable.remove(context.TODO(), false); err != nil {
logrus.Error(err)
}
}()
cr.equalMutable = nil
clearEqualMutable(cr.md)
return cr.md.Commit()
}
func (sr *mutableRef) commit(ctx context.Context) (ImmutableRef, error) {
if !sr.mutable || len(sr.refs) == 0 {
return nil, errors.Wrapf(errInvalid, "invalid mutable ref")
}
id := identity.NewID()
md, _ := sr.cm.md.Get(id)
rec := &cacheRecord{
mu: sr.mu,
cm: sr.cm,
parent: sr.Parent(),
equalMutable: sr,
refs: make(map[Mountable]struct{}),
md: md,
}
if descr := GetDescription(sr.md); descr != "" {
if err := queueDescription(md, descr); err != nil {
return nil, err
}
}
if err := initializeMetadata(rec); err != nil {
return nil, err
}
sr.cm.records[id] = rec
if err := sr.md.Commit(); err != nil {
return nil, err
}
setSize(md, sizeUnknown)
setEqualMutable(md, sr.ID())
if err := md.Commit(); err != nil {
return nil, err
}
ref := rec.ref()
sr.equalImmutable = ref
return ref, nil
}
func (sr *mutableRef) Commit(ctx context.Context) (ImmutableRef, error) {
sr.cm.mu.Lock()
defer sr.cm.mu.Unlock()
sr.mu.Lock()
defer sr.mu.Unlock()
return sr.commit(ctx)
}
func (sr *mutableRef) Release(ctx context.Context) error {
sr.cm.mu.Lock()
defer sr.cm.mu.Unlock()
sr.mu.Lock()
defer sr.mu.Unlock()
return sr.release(ctx)
}
func (sr *mutableRef) release(ctx context.Context) error {
delete(sr.refs, sr)
if getCachePolicy(sr.md) != cachePolicyRetain {
if sr.equalImmutable != nil {
if getCachePolicy(sr.equalImmutable.md) == cachePolicyRetain {
return nil
}
if err := sr.equalImmutable.remove(ctx, false); err != nil {
return err
}
}
if sr.parent != nil {
if err := sr.parent.(*immutableRef).release(ctx); err != nil {
return err
}
}
return sr.remove(ctx, true)
} else {
updateLastUsed(sr.md)
}
return nil
}
func setReadonly(mounts snapshot.Mountable) snapshot.Mountable {
return &readOnlyMounter{mounts}
}
type readOnlyMounter struct {
snapshot.Mountable
}
func (m *readOnlyMounter) Mount() ([]mount.Mount, error) {
mounts, err := m.Mountable.Mount()
if err != nil {
return nil, err
}
for i, m := range mounts {
opts := make([]string, 0, len(m.Options))
for _, opt := range m.Options {
if opt != "rw" {
opts = append(opts, opt)
}
}
opts = append(opts, "ro")
mounts[i].Options = opts
}
return mounts, nil
}

View file

@ -0,0 +1,141 @@
package remotecache
import (
"bytes"
"context"
"encoding/json"
"fmt"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/docker/distribution/manifest"
v1 "github.com/moby/buildkit/cache/remotecache/v1"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/progress"
"github.com/moby/buildkit/util/push"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
type ExporterOpt struct {
SessionManager *session.Manager
}
func NewCacheExporter(opt ExporterOpt) *CacheExporter {
return &CacheExporter{opt: opt}
}
type CacheExporter struct {
opt ExporterOpt
}
func (ce *CacheExporter) ExporterForTarget(target string) *RegistryCacheExporter {
cc := v1.NewCacheChains()
return &RegistryCacheExporter{target: target, CacheExporterTarget: cc, chains: cc, exporter: ce}
}
func (ce *CacheExporter) Finalize(ctx context.Context, cc *v1.CacheChains, target string) error {
config, descs, err := cc.Marshal()
if err != nil {
return err
}
// own type because oci type can't be pushed and docker type doesn't have annotations
type manifestList struct {
manifest.Versioned
// Manifests references platform specific manifests.
Manifests []ocispec.Descriptor `json:"manifests"`
}
var mfst manifestList
mfst.SchemaVersion = 2
mfst.MediaType = images.MediaTypeDockerSchema2ManifestList
allBlobs := map[digest.Digest]struct{}{}
mp := contentutil.NewMultiProvider(nil)
for _, l := range config.Layers {
if _, ok := allBlobs[l.Blob]; ok {
continue
}
dgstPair, ok := descs[l.Blob]
if !ok {
return errors.Errorf("missing blob %s", l.Blob)
}
allBlobs[l.Blob] = struct{}{}
mp.Add(l.Blob, dgstPair.Provider)
mfst.Manifests = append(mfst.Manifests, dgstPair.Descriptor)
}
dt, err := json.Marshal(config)
if err != nil {
return err
}
dgst := digest.FromBytes(dt)
desc := ocispec.Descriptor{
Digest: dgst,
Size: int64(len(dt)),
MediaType: v1.CacheConfigMediaTypeV0,
}
configDone := oneOffProgress(ctx, fmt.Sprintf("writing config %s", dgst))
buf := contentutil.NewBuffer()
if err := content.WriteBlob(ctx, buf, dgst.String(), bytes.NewReader(dt), desc); err != nil {
return configDone(errors.Wrap(err, "error writing config blob"))
}
configDone(nil)
mp.Add(dgst, buf)
mfst.Manifests = append(mfst.Manifests, desc)
dt, err = json.Marshal(mfst)
if err != nil {
return errors.Wrap(err, "failed to marshal manifest")
}
dgst = digest.FromBytes(dt)
buf = contentutil.NewBuffer()
desc = ocispec.Descriptor{
Digest: dgst,
Size: int64(len(dt)),
}
mfstDone := oneOffProgress(ctx, fmt.Sprintf("writing manifest %s", dgst))
if err := content.WriteBlob(ctx, buf, dgst.String(), bytes.NewReader(dt), desc); err != nil {
return mfstDone(errors.Wrap(err, "error writing manifest blob"))
}
mfstDone(nil)
mp.Add(dgst, buf)
return push.Push(ctx, ce.opt.SessionManager, mp, dgst, target, false)
}
type RegistryCacheExporter struct {
solver.CacheExporterTarget
chains *v1.CacheChains
target string
exporter *CacheExporter
}
func (ce *RegistryCacheExporter) Finalize(ctx context.Context) error {
return ce.exporter.Finalize(ctx, ce.chains, ce.target)
}
func oneOffProgress(ctx context.Context, id string) func(err error) error {
pw, _, _ := progress.FromContext(ctx)
now := time.Now()
st := progress.Status{
Started: &now,
}
pw.Write(id, st)
return func(err error) error {
now := time.Now()
st.Completed = &now
pw.Write(id, st)
pw.Close()
return err
}
}

View file

@ -0,0 +1,124 @@
package remotecache
import (
"context"
"encoding/json"
"net/http"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
v1 "github.com/moby/buildkit/cache/remotecache/v1"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/auth"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/worker"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
type ImportOpt struct {
SessionManager *session.Manager
Worker worker.Worker // TODO: remove. This sets the worker where the cache is imported to. Should be passed on load instead.
}
func NewCacheImporter(opt ImportOpt) *CacheImporter {
return &CacheImporter{opt: opt}
}
type CacheImporter struct {
opt ImportOpt
}
func (ci *CacheImporter) getCredentialsFromSession(ctx context.Context) func(string) (string, string, error) {
id := session.FromContext(ctx)
if id == "" {
return nil
}
return func(host string) (string, string, error) {
timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
caller, err := ci.opt.SessionManager.Get(timeoutCtx, id)
if err != nil {
return "", "", err
}
return auth.CredentialsFunc(context.TODO(), caller)(host)
}
}
func (ci *CacheImporter) Resolve(ctx context.Context, ref string) (solver.CacheManager, error) {
resolver := docker.NewResolver(docker.ResolverOptions{
Client: http.DefaultClient,
Credentials: ci.getCredentialsFromSession(ctx),
})
ref, desc, err := resolver.Resolve(ctx, ref)
if err != nil {
return nil, err
}
fetcher, err := resolver.Fetcher(ctx, ref)
if err != nil {
return nil, err
}
b := contentutil.NewBuffer()
if _, err := remotes.FetchHandler(b, fetcher)(ctx, desc); err != nil {
return nil, err
}
dt, err := content.ReadBlob(ctx, b, desc)
if err != nil {
return nil, err
}
var mfst ocispec.Index
if err := json.Unmarshal(dt, &mfst); err != nil {
return nil, err
}
allLayers := v1.DescriptorProvider{}
var configDesc ocispec.Descriptor
for _, m := range mfst.Manifests {
if m.MediaType == v1.CacheConfigMediaTypeV0 {
configDesc = m
continue
}
allLayers[m.Digest] = v1.DescriptorProviderPair{
Descriptor: m,
Provider: contentutil.FromFetcher(fetcher, m),
}
}
if configDesc.Digest == "" {
return nil, errors.Errorf("invalid build cache from %s", ref)
}
if _, err := remotes.FetchHandler(b, fetcher)(ctx, configDesc); err != nil {
return nil, err
}
dt, err = content.ReadBlob(ctx, b, configDesc)
if err != nil {
return nil, err
}
cc := v1.NewCacheChains()
if err := v1.Parse(dt, allLayers, cc); err != nil {
return nil, err
}
keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, ci.opt.Worker)
if err != nil {
return nil, err
}
return solver.NewCacheManager(ref, keysStorage, resultStorage), nil
}

View file

@ -0,0 +1,247 @@
package cacheimport
import (
"context"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/worker"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
func NewCacheKeyStorage(cc *CacheChains, w worker.Worker) (solver.CacheKeyStorage, solver.CacheResultStorage, error) {
storage := &cacheKeyStorage{
byID: map[string]*itemWithOutgoingLinks{},
byItem: map[*item]string{},
byResult: map[string]map[string]struct{}{},
}
for _, it := range cc.items {
if _, err := addItemToStorage(storage, it); err != nil {
return nil, nil, err
}
}
results := &cacheResultStorage{
w: w,
byID: storage.byID,
byResult: storage.byResult,
}
return storage, results, nil
}
func addItemToStorage(k *cacheKeyStorage, it *item) (*itemWithOutgoingLinks, error) {
if id, ok := k.byItem[it]; ok {
if id == "" {
return nil, errors.Errorf("invalid loop")
}
return k.byID[id], nil
}
var id string
if len(it.links) == 0 {
id = it.dgst.String()
} else {
id = identity.NewID()
}
k.byItem[it] = ""
for i, m := range it.links {
for l := range m {
src, err := addItemToStorage(k, l.src)
if err != nil {
return nil, err
}
cl := nlink{
input: i,
dgst: it.dgst,
selector: l.selector,
}
src.links[cl] = append(src.links[cl], id)
}
}
k.byItem[it] = id
itl := &itemWithOutgoingLinks{
item: it,
links: map[nlink][]string{},
}
k.byID[id] = itl
if res := it.result; res != nil {
resultID := remoteID(res)
ids, ok := k.byResult[resultID]
if !ok {
ids = map[string]struct{}{}
k.byResult[resultID] = ids
}
ids[id] = struct{}{}
}
return itl, nil
}
type cacheKeyStorage struct {
byID map[string]*itemWithOutgoingLinks
byItem map[*item]string
byResult map[string]map[string]struct{}
}
type itemWithOutgoingLinks struct {
*item
links map[nlink][]string
}
func (cs *cacheKeyStorage) Exists(id string) bool {
_, ok := cs.byID[id]
return ok
}
func (cs *cacheKeyStorage) Walk(func(id string) error) error {
return nil
}
func (cs *cacheKeyStorage) WalkResults(id string, fn func(solver.CacheResult) error) error {
it, ok := cs.byID[id]
if !ok {
return nil
}
if res := it.result; res != nil {
return fn(solver.CacheResult{ID: remoteID(res), CreatedAt: it.resultTime})
}
return nil
}
func (cs *cacheKeyStorage) Load(id string, resultID string) (solver.CacheResult, error) {
it, ok := cs.byID[id]
if !ok {
return solver.CacheResult{}, nil
}
if res := it.result; res != nil {
return solver.CacheResult{ID: remoteID(res), CreatedAt: it.resultTime}, nil
}
return solver.CacheResult{}, nil
}
func (cs *cacheKeyStorage) AddResult(id string, res solver.CacheResult) error {
return nil
}
func (cs *cacheKeyStorage) Release(resultID string) error {
return nil
}
func (cs *cacheKeyStorage) AddLink(id string, link solver.CacheInfoLink, target string) error {
return nil
}
func (cs *cacheKeyStorage) WalkLinks(id string, link solver.CacheInfoLink, fn func(id string) error) error {
it, ok := cs.byID[id]
if !ok {
return nil
}
for _, id := range it.links[nlink{
dgst: outputKey(link.Digest, int(link.Output)),
input: int(link.Input),
selector: link.Selector.String(),
}] {
if err := fn(id); err != nil {
return err
}
}
return nil
}
// TODO:
func (cs *cacheKeyStorage) WalkBacklinks(id string, fn func(id string, link solver.CacheInfoLink) error) error {
return nil
}
func (cs *cacheKeyStorage) WalkIDsByResult(id string, fn func(id string) error) error {
ids := cs.byResult[id]
for id := range ids {
if err := fn(id); err != nil {
return err
}
}
return nil
}
func (cs *cacheKeyStorage) HasLink(id string, link solver.CacheInfoLink, target string) bool {
l := nlink{
dgst: outputKey(link.Digest, int(link.Output)),
input: int(link.Input),
selector: link.Selector.String(),
}
if it, ok := cs.byID[id]; ok {
for _, id := range it.links[l] {
if id == target {
return true
}
}
}
return false
}
type cacheResultStorage struct {
w worker.Worker
byID map[string]*itemWithOutgoingLinks
byResult map[string]map[string]struct{}
}
func (cs *cacheResultStorage) Save(res solver.Result) (solver.CacheResult, error) {
return solver.CacheResult{}, errors.Errorf("importer is immutable")
}
func (cs *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult) (solver.Result, error) {
remote, err := cs.LoadRemote(ctx, res)
if err != nil {
return nil, err
}
ref, err := cs.w.FromRemote(ctx, remote)
if err != nil {
return nil, err
}
return worker.NewWorkerRefResult(ref, cs.w), nil
}
func (cs *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheResult) (*solver.Remote, error) {
if r := cs.byResultID(res.ID); r != nil {
return r, nil
}
return nil, errors.WithStack(solver.ErrNotFound)
}
func (cs *cacheResultStorage) Exists(id string) bool {
return cs.byResultID(id) != nil
}
func (cs *cacheResultStorage) byResultID(resultID string) *solver.Remote {
m, ok := cs.byResult[resultID]
if !ok || len(m) == 0 {
return nil
}
for id := range m {
it, ok := cs.byID[id]
if ok {
if r := it.result; r != nil {
return r
}
}
}
return nil
}
// unique ID per remote. this ID is not stable.
func remoteID(r *solver.Remote) string {
dgstr := digest.Canonical.Digester()
for _, desc := range r.Descriptors {
dgstr.Hash().Write([]byte(desc.Digest))
}
return dgstr.Digest().String()
}

View file

@ -0,0 +1,127 @@
package cacheimport
import (
"time"
"github.com/containerd/containerd/content"
"github.com/moby/buildkit/solver"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
func NewCacheChains() *CacheChains {
return &CacheChains{visited: map[interface{}]struct{}{}}
}
type CacheChains struct {
items []*item
visited map[interface{}]struct{}
}
func (c *CacheChains) Add(dgst digest.Digest) solver.CacheExporterRecord {
it := &item{c: c, dgst: dgst}
c.items = append(c.items, it)
return it
}
func (c *CacheChains) Visit(v interface{}) {
c.visited[v] = struct{}{}
}
func (c *CacheChains) Visited(v interface{}) bool {
_, ok := c.visited[v]
return ok
}
func (c *CacheChains) normalize() error {
st := &normalizeState{
added: map[*item]*item{},
links: map[*item]map[nlink]map[digest.Digest]struct{}{},
byKey: map[digest.Digest]*item{},
}
for _, it := range c.items {
_, err := normalizeItem(it, st)
if err != nil {
return err
}
}
items := make([]*item, 0, len(st.byKey))
for _, it := range st.byKey {
items = append(items, it)
}
c.items = items
return nil
}
func (c *CacheChains) Marshal() (*CacheConfig, DescriptorProvider, error) {
if err := c.normalize(); err != nil {
return nil, nil, err
}
st := &marshalState{
chainsByID: map[string]int{},
descriptors: DescriptorProvider{},
recordsByItem: map[*item]int{},
}
for _, it := range c.items {
if err := marshalItem(it, st); err != nil {
return nil, nil, err
}
}
cc := CacheConfig{
Layers: st.layers,
Records: st.records,
}
sortConfig(&cc)
return &cc, st.descriptors, nil
}
type DescriptorProvider map[digest.Digest]DescriptorProviderPair
type DescriptorProviderPair struct {
Descriptor ocispec.Descriptor
Provider content.Provider
}
type item struct {
c *CacheChains
dgst digest.Digest
result *solver.Remote
resultTime time.Time
links []map[link]struct{}
}
type link struct {
src *item
selector string
}
func (c *item) AddResult(createdAt time.Time, result *solver.Remote) {
c.resultTime = createdAt
c.result = result
}
func (c *item) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) {
src, ok := rec.(*item)
if !ok {
return
}
for {
if index < len(c.links) {
break
}
c.links = append(c.links, map[link]struct{}{})
}
c.links[index][link{src: src, selector: selector}] = struct{}{}
}
var _ solver.CacheExporterTarget = &CacheChains{}

View file

@ -0,0 +1,50 @@
package cacheimport
// Distibutable build cache
//
// Main manifest is OCI image index
// https://github.com/opencontainers/image-spec/blob/master/image-index.md .
// Manifests array contains descriptors to the cache layers and one instance of
// build cache config with media type application/vnd.buildkit.cacheconfig.v0 .
// The cache layer descripts need to have an annotation with uncompressed digest
// to allow deduplication on extraction and optionally "buildkit/createdat"
// annotation to support maintaining original timestamps.
//
// Cache config file layout:
//
//{
// "layers": [
// {
// "blob": "sha256:deadbeef", <- digest of layer blob in index
// "parent": -1 <- index of parent layer, -1 if no parent
// },
// {
// "blob": "sha256:deadbeef",
// "parent": 0
// }
// ],
//
// "records": [
// {
// "digest": "sha256:deadbeef", <- base digest for the record
// },
// {
// "digest": "sha256:deadbeef",
// "output": 1, <- optional output index
// "layers": [ <- optional array or layer chains
// {
// "createdat": "",
// "layer": 1, <- index to the layer
// }
// ],
// "inputs": [ <- dependant records
// [ <- index of the dependency (0)
// {
// "selector": "sel", <- optional selector
// "link": 0, <- index to the dependant record
// }
// ]
// ]
// }
// ]
// }

View file

@ -0,0 +1,102 @@
package cacheimport
import (
"encoding/json"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/contentutil"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
func Parse(configJSON []byte, provider DescriptorProvider, t solver.CacheExporterTarget) error {
var config CacheConfig
if err := json.Unmarshal(configJSON, &config); err != nil {
return err
}
cache := map[int]solver.CacheExporterRecord{}
for i := range config.Records {
if _, err := parseRecord(config, i, provider, t, cache); err != nil {
return err
}
}
return nil
}
func parseRecord(cc CacheConfig, idx int, provider DescriptorProvider, t solver.CacheExporterTarget, cache map[int]solver.CacheExporterRecord) (solver.CacheExporterRecord, error) {
if r, ok := cache[idx]; ok {
if r == nil {
return nil, errors.Errorf("invalid looping record")
}
return r, nil
}
if idx < 0 || idx >= len(cc.Records) {
return nil, errors.Errorf("invalid record ID: %d", idx)
}
rec := cc.Records[idx]
r := t.Add(rec.Digest)
cache[idx] = nil
for i, inputs := range rec.Inputs {
for _, inp := range inputs {
src, err := parseRecord(cc, inp.LinkIndex, provider, t, cache)
if err != nil {
return nil, err
}
r.LinkFrom(src, i, inp.Selector)
}
}
for _, res := range rec.Results {
visited := map[int]struct{}{}
remote, err := getRemoteChain(cc.Layers, res.LayerIndex, provider, visited)
if err != nil {
return nil, err
}
r.AddResult(res.CreatedAt, remote)
}
cache[idx] = r
return r, nil
}
func getRemoteChain(layers []CacheLayer, idx int, provider DescriptorProvider, visited map[int]struct{}) (*solver.Remote, error) {
if _, ok := visited[idx]; ok {
return nil, errors.Errorf("invalid looping layer")
}
visited[idx] = struct{}{}
if idx < 0 || idx >= len(layers) {
return nil, errors.Errorf("invalid layer index %d", idx)
}
l := layers[idx]
descPair, ok := provider[l.Blob]
if !ok {
return nil, errors.Errorf("missing blob for %s", l.Blob)
}
var r *solver.Remote
if l.ParentIndex != -1 {
var err error
r, err = getRemoteChain(layers, l.ParentIndex, provider, visited)
if err != nil {
return nil, err
}
r.Descriptors = append(r.Descriptors, descPair.Descriptor)
mp := contentutil.NewMultiProvider(r.Provider)
mp.Add(descPair.Descriptor.Digest, descPair.Provider)
r.Provider = mp
return r, nil
}
return &solver.Remote{
Descriptors: []ocispec.Descriptor{descPair.Descriptor},
Provider: descPair.Provider,
}, nil
}

View file

@ -0,0 +1,35 @@
package cacheimport
import (
"time"
digest "github.com/opencontainers/go-digest"
)
const CacheConfigMediaTypeV0 = "application/vnd.buildkit.cacheconfig.v0"
type CacheConfig struct {
Layers []CacheLayer `json:"layers,omitempty"`
Records []CacheRecord `json:"records,omitempty"`
}
type CacheLayer struct {
Blob digest.Digest `json:"blob,omitempty"`
ParentIndex int `json:"parent,omitempty"`
}
type CacheRecord struct {
Results []CacheResult `json:"layers,omitempty"`
Digest digest.Digest `json:"digest,omitempty"`
Inputs [][]CacheInput `json:"inputs,omitempty"`
}
type CacheResult struct {
LayerIndex int `json:"layer"`
CreatedAt time.Time `json:"createdAt,omitempty"`
}
type CacheInput struct {
Selector string `json:"selector,omitempty"`
LinkIndex int `json:"link"`
}

View file

@ -0,0 +1,306 @@
package cacheimport
import (
"fmt"
"sort"
"github.com/containerd/containerd/content"
"github.com/moby/buildkit/solver"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// sortConfig sorts the config structure to make sure it is deterministic
func sortConfig(cc *CacheConfig) {
type indexedLayer struct {
oldIndex int
newIndex int
l CacheLayer
}
unsortedLayers := make([]*indexedLayer, len(cc.Layers))
sortedLayers := make([]*indexedLayer, len(cc.Layers))
for i, l := range cc.Layers {
il := &indexedLayer{oldIndex: i, l: l}
unsortedLayers[i] = il
sortedLayers[i] = il
}
sort.Slice(sortedLayers, func(i, j int) bool {
li := sortedLayers[i].l
lj := sortedLayers[j].l
if li.Blob == lj.Blob {
return li.ParentIndex < lj.ParentIndex
}
return li.Blob < lj.Blob
})
for i, l := range sortedLayers {
l.newIndex = i
}
layers := make([]CacheLayer, len(sortedLayers))
for i, l := range sortedLayers {
if pID := l.l.ParentIndex; pID != -1 {
l.l.ParentIndex = unsortedLayers[pID].newIndex
}
layers[i] = l.l
}
type indexedRecord struct {
oldIndex int
newIndex int
r CacheRecord
}
unsortedRecords := make([]*indexedRecord, len(cc.Records))
sortedRecords := make([]*indexedRecord, len(cc.Records))
for i, r := range cc.Records {
ir := &indexedRecord{oldIndex: i, r: r}
unsortedRecords[i] = ir
sortedRecords[i] = ir
}
sort.Slice(sortedRecords, func(i, j int) bool {
ri := sortedRecords[i].r
rj := sortedRecords[j].r
if ri.Digest != rj.Digest {
return ri.Digest < rj.Digest
}
if len(ri.Inputs) != len(ri.Inputs) {
return len(ri.Inputs) < len(ri.Inputs)
}
for i, inputs := range ri.Inputs {
if len(ri.Inputs[i]) != len(rj.Inputs[i]) {
return len(ri.Inputs[i]) < len(rj.Inputs[i])
}
for j := range inputs {
if ri.Inputs[i][j].Selector != rj.Inputs[i][j].Selector {
return ri.Inputs[i][j].Selector != rj.Inputs[i][j].Selector
}
return cc.Records[ri.Inputs[i][j].LinkIndex].Digest < cc.Records[rj.Inputs[i][j].LinkIndex].Digest
}
}
return ri.Digest < rj.Digest
})
for i, l := range sortedRecords {
l.newIndex = i
}
records := make([]CacheRecord, len(sortedRecords))
for i, r := range sortedRecords {
for j := range r.r.Results {
r.r.Results[j].LayerIndex = unsortedLayers[r.r.Results[j].LayerIndex].newIndex
}
for j, inputs := range r.r.Inputs {
for k := range inputs {
r.r.Inputs[j][k].LinkIndex = unsortedRecords[r.r.Inputs[j][k].LinkIndex].newIndex
}
sort.Slice(inputs, func(i, j int) bool {
return inputs[i].LinkIndex < inputs[j].LinkIndex
})
}
records[i] = r.r
}
cc.Layers = layers
cc.Records = records
}
func outputKey(dgst digest.Digest, idx int) digest.Digest {
return digest.FromBytes([]byte(fmt.Sprintf("%s@%d", dgst, idx)))
}
type nlink struct {
dgst digest.Digest
input int
selector string
}
type normalizeState struct {
added map[*item]*item
links map[*item]map[nlink]map[digest.Digest]struct{}
byKey map[digest.Digest]*item
next int
}
func normalizeItem(it *item, state *normalizeState) (*item, error) {
if it2, ok := state.added[it]; ok {
return it2, nil
}
if len(it.links) == 0 {
id := it.dgst
if it2, ok := state.byKey[id]; ok {
state.added[it] = it2
return it2, nil
}
state.byKey[id] = it
state.added[it] = it
return nil, nil
}
matches := map[digest.Digest]struct{}{}
// check if there is already a matching record
for i, m := range it.links {
if len(m) == 0 {
return nil, errors.Errorf("invalid incomplete links")
}
for l := range m {
nl := nlink{dgst: it.dgst, input: i, selector: l.selector}
it2, err := normalizeItem(l.src, state)
if err != nil {
return nil, err
}
links := state.links[it2][nl]
if i == 0 {
for id := range links {
matches[id] = struct{}{}
}
} else {
for id := range matches {
if _, ok := links[id]; !ok {
delete(matches, id)
}
}
}
}
}
var id digest.Digest
links := it.links
if len(matches) > 0 {
for m := range matches {
if id == "" || id > m {
id = m
}
}
} else {
// keep tmp IDs deterministic
state.next++
id = digest.FromBytes([]byte(fmt.Sprintf("%d", state.next)))
state.byKey[id] = it
it.links = make([]map[link]struct{}, len(it.links))
for i := range it.links {
it.links[i] = map[link]struct{}{}
}
}
it2 := state.byKey[id]
state.added[it] = it2
for i, m := range links {
for l := range m {
subIt, err := normalizeItem(l.src, state)
if err != nil {
return nil, err
}
it2.links[i][link{src: subIt, selector: l.selector}] = struct{}{}
nl := nlink{dgst: it.dgst, input: i, selector: l.selector}
if _, ok := state.links[subIt]; !ok {
state.links[subIt] = map[nlink]map[digest.Digest]struct{}{}
}
if _, ok := state.links[subIt][nl]; !ok {
state.links[subIt][nl] = map[digest.Digest]struct{}{}
}
state.links[subIt][nl][id] = struct{}{}
}
}
return it2, nil
}
type marshalState struct {
layers []CacheLayer
chainsByID map[string]int
descriptors DescriptorProvider
records []CacheRecord
recordsByItem map[*item]int
}
func marshalRemote(r *solver.Remote, state *marshalState) string {
if len(r.Descriptors) == 0 {
return ""
}
type Remote struct {
Descriptors []ocispec.Descriptor
Provider content.Provider
}
var parentID string
if len(r.Descriptors) > 1 {
r2 := &solver.Remote{
Descriptors: r.Descriptors[:len(r.Descriptors)-1],
Provider: r.Provider,
}
parentID = marshalRemote(r2, state)
}
desc := r.Descriptors[len(r.Descriptors)-1]
state.descriptors[desc.Digest] = DescriptorProviderPair{
Descriptor: desc,
Provider: r.Provider,
}
id := desc.Digest.String() + parentID
if _, ok := state.chainsByID[id]; ok {
return id
}
state.chainsByID[id] = len(state.layers)
l := CacheLayer{
Blob: desc.Digest,
ParentIndex: -1,
}
if parentID != "" {
l.ParentIndex = state.chainsByID[parentID]
}
state.layers = append(state.layers, l)
return id
}
func marshalItem(it *item, state *marshalState) error {
if _, ok := state.recordsByItem[it]; ok {
return nil
}
rec := CacheRecord{
Digest: it.dgst,
Inputs: make([][]CacheInput, len(it.links)),
}
for i, m := range it.links {
for l := range m {
if err := marshalItem(l.src, state); err != nil {
return err
}
idx, ok := state.recordsByItem[l.src]
if !ok {
return errors.Errorf("invalid source record: %v", l.src)
}
rec.Inputs[i] = append(rec.Inputs[i], CacheInput{
Selector: l.selector,
LinkIndex: idx,
})
}
}
if it.result != nil {
id := marshalRemote(it.result, state)
if id != "" {
idx, ok := state.chainsByID[id]
if !ok {
return errors.Errorf("parent chainid not found")
}
rec.Results = append(rec.Results, CacheResult{LayerIndex: idx, CreatedAt: it.resultTime})
}
}
state.recordsByItem[it] = len(state.records)
state.records = append(state.records, rec)
return nil
}

136
vendor/github.com/moby/buildkit/client/client.go generated vendored Normal file
View file

@ -0,0 +1,136 @@
package client
import (
"context"
"crypto/tls"
"crypto/x509"
"io/ioutil"
"time"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/util/appdefaults"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
type Client struct {
conn *grpc.ClientConn
}
type ClientOpt interface{}
// New returns a new buildkit client. Address can be empty for the system-default address.
func New(address string, opts ...ClientOpt) (*Client, error) {
gopts := []grpc.DialOption{
grpc.WithDialer(dialer),
grpc.FailOnNonTempDialError(true),
}
needWithInsecure := true
for _, o := range opts {
if _, ok := o.(*withBlockOpt); ok {
gopts = append(gopts, grpc.WithBlock(), grpc.FailOnNonTempDialError(true))
}
if credInfo, ok := o.(*withCredentials); ok {
opt, err := loadCredentials(credInfo)
if err != nil {
return nil, err
}
gopts = append(gopts, opt)
needWithInsecure = false
}
if wt, ok := o.(*withTracer); ok {
gopts = append(gopts,
grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(wt.tracer, otgrpc.LogPayloads())),
grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(wt.tracer)))
}
}
if needWithInsecure {
gopts = append(gopts, grpc.WithInsecure())
}
if address == "" {
address = appdefaults.Address
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
conn, err := grpc.DialContext(ctx, address, gopts...)
if err != nil {
return nil, errors.Wrapf(err, "failed to dial %q . make sure buildkitd is running", address)
}
c := &Client{
conn: conn,
}
return c, nil
}
func (c *Client) controlClient() controlapi.ControlClient {
return controlapi.NewControlClient(c.conn)
}
func (c *Client) Close() error {
return c.conn.Close()
}
type withBlockOpt struct{}
func WithBlock() ClientOpt {
return &withBlockOpt{}
}
type withCredentials struct {
ServerName string
CACert string
Cert string
Key string
}
// WithCredentials configures the TLS parameters of the client.
// Arguments:
// * serverName: specifies the name of the target server
// * ca: specifies the filepath of the CA certificate to use for verification
// * cert: specifies the filepath of the client certificate
// * key: specifies the filepath of the client key
func WithCredentials(serverName, ca, cert, key string) ClientOpt {
return &withCredentials{serverName, ca, cert, key}
}
func loadCredentials(opts *withCredentials) (grpc.DialOption, error) {
ca, err := ioutil.ReadFile(opts.CACert)
if err != nil {
return nil, errors.Wrap(err, "could not read ca certificate")
}
certPool := x509.NewCertPool()
if ok := certPool.AppendCertsFromPEM(ca); !ok {
return nil, errors.New("failed to append ca certs")
}
cfg := &tls.Config{
ServerName: opts.ServerName,
RootCAs: certPool,
}
// we will produce an error if the user forgot about either cert or key if at least one is specified
if opts.Cert != "" || opts.Key != "" {
cert, err := tls.LoadX509KeyPair(opts.Cert, opts.Key)
if err != nil {
return nil, errors.Wrap(err, "could not read certificate/key")
}
cfg.Certificates = []tls.Certificate{cert}
cfg.BuildNameToCertificate()
}
return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil
}
func WithTracer(t opentracing.Tracer) ClientOpt {
return &withTracer{t}
}
type withTracer struct {
tracer opentracing.Tracer
}

19
vendor/github.com/moby/buildkit/client/client_unix.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
// +build !windows
package client
import (
"net"
"strings"
"time"
"github.com/pkg/errors"
)
func dialer(address string, timeout time.Duration) (net.Conn, error) {
addrParts := strings.SplitN(address, "://", 2)
if len(addrParts) != 2 {
return nil, errors.Errorf("invalid address %s", address)
}
return net.DialTimeout(addrParts[0], addrParts[1], timeout)
}

View file

@ -0,0 +1,24 @@
package client
import (
"net"
"strings"
"time"
"github.com/Microsoft/go-winio"
"github.com/pkg/errors"
)
func dialer(address string, timeout time.Duration) (net.Conn, error) {
addrParts := strings.SplitN(address, "://", 2)
if len(addrParts) != 2 {
return nil, errors.Errorf("invalid address %s", address)
}
switch addrParts[0] {
case "npipe":
address = strings.Replace(addrParts[1], "/", "\\", 0)
return winio.DialPipe(address, &timeout)
default:
return net.DialTimeout(addrParts[0], addrParts[1], timeout)
}
}

73
vendor/github.com/moby/buildkit/client/diskusage.go generated vendored Normal file
View file

@ -0,0 +1,73 @@
package client
import (
"context"
"sort"
"time"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/pkg/errors"
)
type UsageInfo struct {
ID string
Mutable bool
InUse bool
Size int64
CreatedAt time.Time
LastUsedAt *time.Time
UsageCount int
Parent string
Description string
}
func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*UsageInfo, error) {
info := &DiskUsageInfo{}
for _, o := range opts {
o(info)
}
req := &controlapi.DiskUsageRequest{Filter: info.Filter}
resp, err := c.controlClient().DiskUsage(ctx, req)
if err != nil {
return nil, errors.Wrap(err, "failed to call diskusage")
}
var du []*UsageInfo
for _, d := range resp.Record {
du = append(du, &UsageInfo{
ID: d.ID,
Mutable: d.Mutable,
InUse: d.InUse,
Size: d.Size_,
Parent: d.Parent,
CreatedAt: d.CreatedAt,
Description: d.Description,
UsageCount: int(d.UsageCount),
LastUsedAt: d.LastUsedAt,
})
}
sort.Slice(du, func(i, j int) bool {
if du[i].Size == du[j].Size {
return du[i].ID > du[j].ID
}
return du[i].Size > du[j].Size
})
return du, nil
}
type DiskUsageOption func(*DiskUsageInfo)
type DiskUsageInfo struct {
Filter string
}
func WithFilter(f string) DiskUsageOption {
return func(di *DiskUsageInfo) {
di.Filter = f
}
}

8
vendor/github.com/moby/buildkit/client/exporters.go generated vendored Normal file
View file

@ -0,0 +1,8 @@
package client
const (
ExporterImage = "image"
ExporterLocal = "local"
ExporterOCI = "oci"
ExporterDocker = "docker"
)

45
vendor/github.com/moby/buildkit/client/graph.go generated vendored Normal file
View file

@ -0,0 +1,45 @@
package client
import (
"time"
digest "github.com/opencontainers/go-digest"
)
type Vertex struct {
Digest digest.Digest
Inputs []digest.Digest
Name string
Started *time.Time
Completed *time.Time
Cached bool
Error string
}
type VertexStatus struct {
ID string
Vertex digest.Digest
Name string
Total int64
Current int64
Timestamp time.Time
Started *time.Time
Completed *time.Time
}
type VertexLog struct {
Vertex digest.Digest
Stream int
Data []byte
Timestamp time.Time
}
type SolveStatus struct {
Vertexes []*Vertex
Statuses []*VertexStatus
Logs []*VertexLog
}
type SolveResponse struct {
ExporterResponse map[string]string
}

387
vendor/github.com/moby/buildkit/client/llb/exec.go generated vendored Normal file
View file

@ -0,0 +1,387 @@
package llb
import (
_ "crypto/sha256"
"sort"
"github.com/moby/buildkit/solver/pb"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
type Meta struct {
Args []string
Env EnvList
Cwd string
User string
ProxyEnv *ProxyEnv
}
func NewExecOp(root Output, meta Meta, readOnly bool, md OpMetadata) *ExecOp {
e := &ExecOp{meta: meta, cachedOpMetadata: md}
rootMount := &mount{
target: pb.RootMount,
source: root,
readonly: readOnly,
}
e.mounts = append(e.mounts, rootMount)
if readOnly {
e.root = root
} else {
e.root = &output{vertex: e, getIndex: e.getMountIndexFn(rootMount)}
}
rootMount.output = e.root
return e
}
type mount struct {
target string
readonly bool
source Output
output Output
selector string
cacheID string
tmpfs bool
// hasOutput bool
}
type ExecOp struct {
root Output
mounts []*mount
meta Meta
cachedPBDigest digest.Digest
cachedPB []byte
cachedOpMetadata OpMetadata
isValidated bool
}
func (e *ExecOp) AddMount(target string, source Output, opt ...MountOption) Output {
m := &mount{
target: target,
source: source,
}
for _, o := range opt {
o(m)
}
e.mounts = append(e.mounts, m)
if m.readonly {
m.output = source
} else if m.tmpfs {
m.output = &output{vertex: e, err: errors.Errorf("tmpfs mount for %s can't be used as a parent", target)}
} else {
m.output = &output{vertex: e, getIndex: e.getMountIndexFn(m)}
}
e.cachedPB = nil
e.isValidated = false
return m.output
}
func (e *ExecOp) GetMount(target string) Output {
for _, m := range e.mounts {
if m.target == target {
return m.output
}
}
return nil
}
func (e *ExecOp) Validate() error {
if e.isValidated {
return nil
}
if len(e.meta.Args) == 0 {
return errors.Errorf("arguments are required")
}
if e.meta.Cwd == "" {
return errors.Errorf("working directory is required")
}
for _, m := range e.mounts {
if m.source != nil {
if err := m.source.Vertex().Validate(); err != nil {
return err
}
}
}
e.isValidated = true
return nil
}
func (e *ExecOp) Marshal() (digest.Digest, []byte, *OpMetadata, error) {
if e.cachedPB != nil {
return e.cachedPBDigest, e.cachedPB, &e.cachedOpMetadata, nil
}
if err := e.Validate(); err != nil {
return "", nil, nil, err
}
// make sure mounts are sorted
sort.Slice(e.mounts, func(i, j int) bool {
return e.mounts[i].target < e.mounts[j].target
})
peo := &pb.ExecOp{
Meta: &pb.Meta{
Args: e.meta.Args,
Env: e.meta.Env.ToArray(),
Cwd: e.meta.Cwd,
User: e.meta.User,
},
}
if p := e.meta.ProxyEnv; p != nil {
peo.Meta.ProxyEnv = &pb.ProxyEnv{
HttpProxy: p.HttpProxy,
HttpsProxy: p.HttpsProxy,
FtpProxy: p.FtpProxy,
NoProxy: p.NoProxy,
}
}
pop := &pb.Op{
Op: &pb.Op_Exec{
Exec: peo,
},
}
outIndex := 0
for _, m := range e.mounts {
inputIndex := pb.InputIndex(len(pop.Inputs))
if m.source != nil {
if m.tmpfs {
return "", nil, nil, errors.Errorf("tmpfs mounts must use scratch")
}
inp, err := m.source.ToInput()
if err != nil {
return "", nil, nil, err
}
newInput := true
for i, inp2 := range pop.Inputs {
if *inp == *inp2 {
inputIndex = pb.InputIndex(i)
newInput = false
break
}
}
if newInput {
pop.Inputs = append(pop.Inputs, inp)
}
} else {
inputIndex = pb.Empty
}
outputIndex := pb.OutputIndex(-1)
if !m.readonly && m.cacheID == "" && !m.tmpfs {
outputIndex = pb.OutputIndex(outIndex)
outIndex++
}
pm := &pb.Mount{
Input: inputIndex,
Dest: m.target,
Readonly: m.readonly,
Output: outputIndex,
Selector: m.selector,
}
if m.cacheID != "" {
pm.MountType = pb.MountType_CACHE
pm.CacheOpt = &pb.CacheOpt{
ID: m.cacheID,
}
}
if m.tmpfs {
pm.MountType = pb.MountType_TMPFS
}
peo.Mounts = append(peo.Mounts, pm)
}
dt, err := pop.Marshal()
if err != nil {
return "", nil, nil, err
}
e.cachedPBDigest = digest.FromBytes(dt)
e.cachedPB = dt
return e.cachedPBDigest, dt, &e.cachedOpMetadata, nil
}
func (e *ExecOp) Output() Output {
return e.root
}
func (e *ExecOp) Inputs() (inputs []Output) {
mm := map[Output]struct{}{}
for _, m := range e.mounts {
if m.source != nil {
mm[m.source] = struct{}{}
}
}
for o := range mm {
inputs = append(inputs, o)
}
return
}
func (e *ExecOp) getMountIndexFn(m *mount) func() (pb.OutputIndex, error) {
return func() (pb.OutputIndex, error) {
// make sure mounts are sorted
sort.Slice(e.mounts, func(i, j int) bool {
return e.mounts[i].target < e.mounts[j].target
})
i := 0
for _, m2 := range e.mounts {
if m2.readonly || m2.cacheID != "" {
continue
}
if m == m2 {
return pb.OutputIndex(i), nil
}
i++
}
return pb.OutputIndex(0), errors.Errorf("invalid mount: %s", m.target)
}
}
type ExecState struct {
State
exec *ExecOp
}
func (e ExecState) AddMount(target string, source State, opt ...MountOption) State {
return source.WithOutput(e.exec.AddMount(target, source.Output(), opt...))
}
func (e ExecState) GetMount(target string) State {
return NewState(e.exec.GetMount(target))
}
func (e ExecState) Root() State {
return e.State
}
type MountOption func(*mount)
func Readonly(m *mount) {
m.readonly = true
}
func SourcePath(src string) MountOption {
return func(m *mount) {
m.selector = src
}
}
func AsPersistentCacheDir(id string) MountOption {
return func(m *mount) {
m.cacheID = id
}
}
func Tmpfs() MountOption {
return func(m *mount) {
m.tmpfs = true
}
}
type RunOption interface {
SetRunOption(es *ExecInfo)
}
type runOptionFunc func(*ExecInfo)
func (fn runOptionFunc) SetRunOption(ei *ExecInfo) {
fn(ei)
}
func Shlex(str string) RunOption {
return Shlexf(str)
}
func Shlexf(str string, v ...interface{}) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = shlexf(str, v...)(ei.State)
})
}
func Args(a []string) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = args(a...)(ei.State)
})
}
func AddEnv(key, value string) RunOption {
return AddEnvf(key, value)
}
func AddEnvf(key, value string, v ...interface{}) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = ei.State.AddEnvf(key, value, v...)
})
}
func User(str string) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = ei.State.User(str)
})
}
func Dir(str string) RunOption {
return Dirf(str)
}
func Dirf(str string, v ...interface{}) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = ei.State.Dirf(str, v...)
})
}
func Reset(s State) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = ei.State.Reset(s)
})
}
func With(so ...StateOption) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = ei.State.With(so...)
})
}
func AddMount(dest string, mountState State, opts ...MountOption) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.Mounts = append(ei.Mounts, MountInfo{dest, mountState.Output(), opts})
})
}
func ReadonlyRootFS() RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.ReadonlyRootFS = true
})
}
func WithProxy(ps ProxyEnv) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.ProxyEnv = &ps
})
}
type ExecInfo struct {
opMetaWrapper
State State
Mounts []MountInfo
ReadonlyRootFS bool
ProxyEnv *ProxyEnv
}
type MountInfo struct {
Target string
Source Output
Opts []MountOption
}
type ProxyEnv struct {
HttpProxy string
HttpsProxy string
FtpProxy string
NoProxy string
}

View file

@ -0,0 +1,87 @@
package imagemetaresolver
import (
"context"
"net/http"
"sync"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/docker/docker/pkg/locker"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/imageutil"
digest "github.com/opencontainers/go-digest"
)
var defaultImageMetaResolver llb.ImageMetaResolver
var defaultImageMetaResolverOnce sync.Once
var WithDefault = llb.ImageOptionFunc(func(ii *llb.ImageInfo) {
llb.WithMetaResolver(Default()).SetImageOption(ii)
})
type imageMetaResolverOpts struct {
platform string
}
type ImageMetaResolverOpt func(o *imageMetaResolverOpts)
func WithPlatform(p string) ImageMetaResolverOpt {
return func(o *imageMetaResolverOpts) {
o.platform = p
}
}
func New(with ...ImageMetaResolverOpt) llb.ImageMetaResolver {
var opts imageMetaResolverOpts
for _, f := range with {
f(&opts)
}
return &imageMetaResolver{
resolver: docker.NewResolver(docker.ResolverOptions{
Client: http.DefaultClient,
}),
platform: opts.platform,
buffer: contentutil.NewBuffer(),
cache: map[string]resolveResult{},
locker: locker.New(),
}
}
func Default() llb.ImageMetaResolver {
defaultImageMetaResolverOnce.Do(func() {
defaultImageMetaResolver = New()
})
return defaultImageMetaResolver
}
type imageMetaResolver struct {
resolver remotes.Resolver
buffer contentutil.Buffer
platform string
locker *locker.Locker
cache map[string]resolveResult
}
type resolveResult struct {
config []byte
dgst digest.Digest
}
func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error) {
imr.locker.Lock(ref)
defer imr.locker.Unlock(ref)
if res, ok := imr.cache[ref]; ok {
return res.dgst, res.config, nil
}
dgst, config, err := imageutil.Config(ctx, ref, imr.resolver, imr.buffer, imr.platform)
if err != nil {
return "", nil, err
}
imr.cache[ref] = resolveResult{dgst: dgst, config: config}
return dgst, config, nil
}

60
vendor/github.com/moby/buildkit/client/llb/marshal.go generated vendored Normal file
View file

@ -0,0 +1,60 @@
package llb
import (
"io"
"io/ioutil"
"github.com/moby/buildkit/solver/pb"
digest "github.com/opencontainers/go-digest"
)
// Definition is the LLB definition structure with per-vertex metadata entries
// Corresponds to the Definition structure defined in solver/pb.Definition.
type Definition struct {
Def [][]byte
Metadata map[digest.Digest]OpMetadata
}
func (def *Definition) ToPB() *pb.Definition {
md := make(map[digest.Digest]OpMetadata)
for k, v := range def.Metadata {
md[k] = v
}
return &pb.Definition{
Def: def.Def,
Metadata: md,
}
}
func (def *Definition) FromPB(x *pb.Definition) {
def.Def = x.Def
def.Metadata = make(map[digest.Digest]OpMetadata)
for k, v := range x.Metadata {
def.Metadata[k] = v
}
}
type OpMetadata = pb.OpMetadata
func WriteTo(def *Definition, w io.Writer) error {
b, err := def.ToPB().Marshal()
if err != nil {
return err
}
_, err = w.Write(b)
return err
}
func ReadFrom(r io.Reader) (*Definition, error) {
b, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
var pbDef pb.Definition
if err := pbDef.Unmarshal(b); err != nil {
return nil, err
}
var def Definition
def.FromPB(&pbDef)
return &def, nil
}

152
vendor/github.com/moby/buildkit/client/llb/meta.go generated vendored Normal file
View file

@ -0,0 +1,152 @@
package llb
import (
"fmt"
"path"
"github.com/google/shlex"
)
type contextKeyT string
var (
keyArgs = contextKeyT("llb.exec.args")
keyDir = contextKeyT("llb.exec.dir")
keyEnv = contextKeyT("llb.exec.env")
keyUser = contextKeyT("llb.exec.user")
)
func addEnv(key, value string) StateOption {
return addEnvf(key, value)
}
func addEnvf(key, value string, v ...interface{}) StateOption {
return func(s State) State {
return s.WithValue(keyEnv, getEnv(s).AddOrReplace(key, fmt.Sprintf(value, v...)))
}
}
func dir(str string) StateOption {
return dirf(str)
}
func dirf(str string, v ...interface{}) StateOption {
return func(s State) State {
value := fmt.Sprintf(str, v...)
if !path.IsAbs(value) {
prev := getDir(s)
if prev == "" {
prev = "/"
}
value = path.Join(prev, value)
}
return s.WithValue(keyDir, value)
}
}
func user(str string) StateOption {
return func(s State) State {
return s.WithValue(keyUser, str)
}
}
func reset(s_ State) StateOption {
return func(s State) State {
s = NewState(s.Output())
s.ctx = s_.ctx
return s
}
}
func getEnv(s State) EnvList {
v := s.Value(keyEnv)
if v != nil {
return v.(EnvList)
}
return EnvList{}
}
func getDir(s State) string {
v := s.Value(keyDir)
if v != nil {
return v.(string)
}
return ""
}
func getArgs(s State) []string {
v := s.Value(keyArgs)
if v != nil {
return v.([]string)
}
return nil
}
func getUser(s State) string {
v := s.Value(keyUser)
if v != nil {
return v.(string)
}
return ""
}
func args(args ...string) StateOption {
return func(s State) State {
return s.WithValue(keyArgs, args)
}
}
func shlexf(str string, v ...interface{}) StateOption {
return func(s State) State {
arg, err := shlex.Split(fmt.Sprintf(str, v...))
if err != nil {
// TODO: handle error
}
return args(arg...)(s)
}
}
type EnvList []KeyValue
type KeyValue struct {
key string
value string
}
func (e EnvList) AddOrReplace(k, v string) EnvList {
e = e.Delete(k)
e = append(e, KeyValue{key: k, value: v})
return e
}
func (e EnvList) Delete(k string) EnvList {
e = append([]KeyValue(nil), e...)
if i, ok := e.Index(k); ok {
return append(e[:i], e[i+1:]...)
}
return e
}
func (e EnvList) Get(k string) (string, bool) {
if index, ok := e.Index(k); ok {
return e[index].value, true
}
return "", false
}
func (e EnvList) Index(k string) (int, bool) {
for i, kv := range e {
if kv.key == k {
return i, true
}
}
return -1, false
}
func (e EnvList) ToArray() []string {
out := make([]string, 0, len(e))
for _, kv := range e {
out = append(out, kv.key+"="+kv.value)
}
return out
}

17
vendor/github.com/moby/buildkit/client/llb/resolver.go generated vendored Normal file
View file

@ -0,0 +1,17 @@
package llb
import (
"context"
digest "github.com/opencontainers/go-digest"
)
func WithMetaResolver(mr ImageMetaResolver) ImageOption {
return ImageOptionFunc(func(ii *ImageInfo) {
ii.metaResolver = mr
})
}
type ImageMetaResolver interface {
ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error)
}

359
vendor/github.com/moby/buildkit/client/llb/source.go generated vendored Normal file
View file

@ -0,0 +1,359 @@
package llb
import (
"context"
_ "crypto/sha256"
"encoding/json"
"os"
"strconv"
"strings"
"github.com/docker/distribution/reference"
"github.com/moby/buildkit/solver/pb"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
type SourceOp struct {
id string
attrs map[string]string
output Output
cachedPBDigest digest.Digest
cachedPB []byte
cachedOpMetadata OpMetadata
err error
}
func NewSource(id string, attrs map[string]string, md OpMetadata) *SourceOp {
s := &SourceOp{
id: id,
attrs: attrs,
cachedOpMetadata: md,
}
s.output = &output{vertex: s}
return s
}
func (s *SourceOp) Validate() error {
if s.err != nil {
return s.err
}
if s.id == "" {
return errors.Errorf("source identifier can't be empty")
}
return nil
}
func (s *SourceOp) Marshal() (digest.Digest, []byte, *OpMetadata, error) {
if s.cachedPB != nil {
return s.cachedPBDigest, s.cachedPB, &s.cachedOpMetadata, nil
}
if err := s.Validate(); err != nil {
return "", nil, nil, err
}
proto := &pb.Op{
Op: &pb.Op_Source{
Source: &pb.SourceOp{Identifier: s.id, Attrs: s.attrs},
},
}
dt, err := proto.Marshal()
if err != nil {
return "", nil, nil, err
}
s.cachedPB = dt
s.cachedPBDigest = digest.FromBytes(dt)
return s.cachedPBDigest, dt, &s.cachedOpMetadata, nil
}
func (s *SourceOp) Output() Output {
return s.output
}
func (s *SourceOp) Inputs() []Output {
return nil
}
func Source(id string) State {
return NewState(NewSource(id, nil, OpMetadata{}).Output())
}
func Image(ref string, opts ...ImageOption) State {
r, err := reference.ParseNormalizedNamed(ref)
if err == nil {
ref = reference.TagNameOnly(r).String()
}
var info ImageInfo
for _, opt := range opts {
opt.SetImageOption(&info)
}
src := NewSource("docker-image://"+ref, nil, info.Metadata()) // controversial
if err != nil {
src.err = err
}
if info.metaResolver != nil {
_, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref)
if err != nil {
src.err = err
} else {
var img struct {
Config struct {
Env []string `json:"Env,omitempty"`
WorkingDir string `json:"WorkingDir,omitempty"`
User string `json:"User,omitempty"`
} `json:"config,omitempty"`
}
if err := json.Unmarshal(dt, &img); err != nil {
src.err = err
} else {
st := NewState(src.Output())
for _, env := range img.Config.Env {
parts := strings.SplitN(env, "=", 2)
if len(parts[0]) > 0 {
var v string
if len(parts) > 1 {
v = parts[1]
}
st = st.AddEnv(parts[0], v)
}
}
st = st.Dir(img.Config.WorkingDir)
return st
}
}
}
return NewState(src.Output())
}
type ImageOption interface {
SetImageOption(*ImageInfo)
}
type ImageOptionFunc func(*ImageInfo)
func (fn ImageOptionFunc) SetImageOption(ii *ImageInfo) {
fn(ii)
}
type ImageInfo struct {
opMetaWrapper
metaResolver ImageMetaResolver
}
func Git(remote, ref string, opts ...GitOption) State {
url := ""
for _, prefix := range []string{
"http://", "https://", "git://", "git@",
} {
if strings.HasPrefix(remote, prefix) {
url = strings.Split(remote, "#")[0]
remote = strings.TrimPrefix(remote, prefix)
}
}
id := remote
if ref != "" {
id += "#" + ref
}
gi := &GitInfo{}
for _, o := range opts {
o.SetGitOption(gi)
}
attrs := map[string]string{}
if gi.KeepGitDir {
attrs[pb.AttrKeepGitDir] = "true"
}
if url != "" {
attrs[pb.AttrFullRemoteURL] = url
}
source := NewSource("git://"+id, attrs, gi.Metadata())
return NewState(source.Output())
}
type GitOption interface {
SetGitOption(*GitInfo)
}
type gitOptionFunc func(*GitInfo)
func (fn gitOptionFunc) SetGitOption(gi *GitInfo) {
fn(gi)
}
type GitInfo struct {
opMetaWrapper
KeepGitDir bool
}
func KeepGitDir() GitOption {
return gitOptionFunc(func(gi *GitInfo) {
gi.KeepGitDir = true
})
}
func Scratch() State {
return NewState(nil)
}
func Local(name string, opts ...LocalOption) State {
gi := &LocalInfo{}
for _, o := range opts {
o.SetLocalOption(gi)
}
attrs := map[string]string{}
if gi.SessionID != "" {
attrs[pb.AttrLocalSessionID] = gi.SessionID
}
if gi.IncludePatterns != "" {
attrs[pb.AttrIncludePatterns] = gi.IncludePatterns
}
if gi.FollowPaths != "" {
attrs[pb.AttrFollowPaths] = gi.FollowPaths
}
if gi.ExcludePatterns != "" {
attrs[pb.AttrExcludePatterns] = gi.ExcludePatterns
}
if gi.SharedKeyHint != "" {
attrs[pb.AttrSharedKeyHint] = gi.SharedKeyHint
}
source := NewSource("local://"+name, attrs, gi.Metadata())
return NewState(source.Output())
}
type LocalOption interface {
SetLocalOption(*LocalInfo)
}
type localOptionFunc func(*LocalInfo)
func (fn localOptionFunc) SetLocalOption(li *LocalInfo) {
fn(li)
}
func SessionID(id string) LocalOption {
return localOptionFunc(func(li *LocalInfo) {
li.SessionID = id
})
}
func IncludePatterns(p []string) LocalOption {
return localOptionFunc(func(li *LocalInfo) {
if len(p) == 0 {
li.IncludePatterns = ""
return
}
dt, _ := json.Marshal(p) // empty on error
li.IncludePatterns = string(dt)
})
}
func FollowPaths(p []string) LocalOption {
return localOptionFunc(func(li *LocalInfo) {
if len(p) == 0 {
li.FollowPaths = ""
return
}
dt, _ := json.Marshal(p) // empty on error
li.FollowPaths = string(dt)
})
}
func ExcludePatterns(p []string) LocalOption {
return localOptionFunc(func(li *LocalInfo) {
if len(p) == 0 {
li.ExcludePatterns = ""
return
}
dt, _ := json.Marshal(p) // empty on error
li.ExcludePatterns = string(dt)
})
}
func SharedKeyHint(h string) LocalOption {
return localOptionFunc(func(li *LocalInfo) {
li.SharedKeyHint = h
})
}
type LocalInfo struct {
opMetaWrapper
SessionID string
IncludePatterns string
ExcludePatterns string
FollowPaths string
SharedKeyHint string
}
func HTTP(url string, opts ...HTTPOption) State {
hi := &HTTPInfo{}
for _, o := range opts {
o.SetHTTPOption(hi)
}
attrs := map[string]string{}
if hi.Checksum != "" {
attrs[pb.AttrHTTPChecksum] = hi.Checksum.String()
}
if hi.Filename != "" {
attrs[pb.AttrHTTPFilename] = hi.Filename
}
if hi.Perm != 0 {
attrs[pb.AttrHTTPPerm] = "0" + strconv.FormatInt(int64(hi.Perm), 8)
}
if hi.UID != 0 {
attrs[pb.AttrHTTPUID] = strconv.Itoa(hi.UID)
}
if hi.UID != 0 {
attrs[pb.AttrHTTPGID] = strconv.Itoa(hi.GID)
}
source := NewSource(url, attrs, hi.Metadata())
return NewState(source.Output())
}
type HTTPInfo struct {
opMetaWrapper
Checksum digest.Digest
Filename string
Perm int
UID int
GID int
}
type HTTPOption interface {
SetHTTPOption(*HTTPInfo)
}
type httpOptionFunc func(*HTTPInfo)
func (fn httpOptionFunc) SetHTTPOption(hi *HTTPInfo) {
fn(hi)
}
func Checksum(dgst digest.Digest) HTTPOption {
return httpOptionFunc(func(hi *HTTPInfo) {
hi.Checksum = dgst
})
}
func Chmod(perm os.FileMode) HTTPOption {
return httpOptionFunc(func(hi *HTTPInfo) {
hi.Perm = int(perm) & 0777
})
}
func Filename(name string) HTTPOption {
return httpOptionFunc(func(hi *HTTPInfo) {
hi.Filename = name
})
}
func Chown(uid, gid int) HTTPOption {
return httpOptionFunc(func(hi *HTTPInfo) {
hi.UID = uid
hi.GID = gid
})
}

316
vendor/github.com/moby/buildkit/client/llb/state.go generated vendored Normal file
View file

@ -0,0 +1,316 @@
package llb
import (
"context"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/system"
digest "github.com/opencontainers/go-digest"
)
type StateOption func(State) State
type Output interface {
ToInput() (*pb.Input, error)
Vertex() Vertex
}
type Vertex interface {
Validate() error
Marshal() (digest.Digest, []byte, *OpMetadata, error)
Output() Output
Inputs() []Output
}
func NewState(o Output) State {
s := State{
out: o,
ctx: context.Background(),
}
s = dir("/")(s)
s = addEnv("PATH", system.DefaultPathEnv)(s)
return s
}
type State struct {
out Output
ctx context.Context
}
func (s State) WithValue(k, v interface{}) State {
return State{
out: s.out,
ctx: context.WithValue(s.ctx, k, v),
}
}
func (s State) Value(k interface{}) interface{} {
return s.ctx.Value(k)
}
func (s State) Marshal(md ...MetadataOpt) (*Definition, error) {
def := &Definition{
Metadata: make(map[digest.Digest]OpMetadata, 0),
}
if s.Output() == nil {
return def, nil
}
def, err := marshal(s.Output().Vertex(), def, map[digest.Digest]struct{}{}, map[Vertex]struct{}{}, md)
if err != nil {
return def, err
}
inp, err := s.Output().ToInput()
if err != nil {
return def, err
}
proto := &pb.Op{Inputs: []*pb.Input{inp}}
dt, err := proto.Marshal()
if err != nil {
return def, err
}
def.Def = append(def.Def, dt)
return def, nil
}
func marshal(v Vertex, def *Definition, cache map[digest.Digest]struct{}, vertexCache map[Vertex]struct{}, md []MetadataOpt) (*Definition, error) {
if _, ok := vertexCache[v]; ok {
return def, nil
}
for _, inp := range v.Inputs() {
var err error
def, err = marshal(inp.Vertex(), def, cache, vertexCache, md)
if err != nil {
return def, err
}
}
dgst, dt, opMeta, err := v.Marshal()
if err != nil {
return def, err
}
vertexCache[v] = struct{}{}
if opMeta != nil {
m := mergeMetadata(def.Metadata[dgst], *opMeta)
for _, f := range md {
f.SetMetadataOption(&m)
}
def.Metadata[dgst] = m
}
if _, ok := cache[dgst]; ok {
return def, nil
}
def.Def = append(def.Def, dt)
cache[dgst] = struct{}{}
return def, nil
}
func (s State) Validate() error {
return s.Output().Vertex().Validate()
}
func (s State) Output() Output {
return s.out
}
func (s State) WithOutput(o Output) State {
return State{
out: o,
ctx: s.ctx,
}
}
func (s State) Run(ro ...RunOption) ExecState {
ei := &ExecInfo{State: s}
for _, o := range ro {
o.SetRunOption(ei)
}
meta := Meta{
Args: getArgs(ei.State),
Cwd: getDir(ei.State),
Env: getEnv(ei.State),
User: getUser(ei.State),
ProxyEnv: ei.ProxyEnv,
}
exec := NewExecOp(s.Output(), meta, ei.ReadonlyRootFS, ei.Metadata())
for _, m := range ei.Mounts {
exec.AddMount(m.Target, m.Source, m.Opts...)
}
return ExecState{
State: s.WithOutput(exec.Output()),
exec: exec,
}
}
func (s State) AddEnv(key, value string) State {
return s.AddEnvf(key, value)
}
func (s State) AddEnvf(key, value string, v ...interface{}) State {
return addEnvf(key, value, v...)(s)
}
func (s State) Dir(str string) State {
return s.Dirf(str)
}
func (s State) Dirf(str string, v ...interface{}) State {
return dirf(str, v...)(s)
}
func (s State) GetEnv(key string) (string, bool) {
return getEnv(s).Get(key)
}
func (s State) GetDir() string {
return getDir(s)
}
func (s State) GetArgs() []string {
return getArgs(s)
}
func (s State) Reset(s2 State) State {
return reset(s2)(s)
}
func (s State) User(v string) State {
return user(v)(s)
}
func (s State) With(so ...StateOption) State {
for _, o := range so {
s = o(s)
}
return s
}
type output struct {
vertex Vertex
getIndex func() (pb.OutputIndex, error)
err error
}
func (o *output) ToInput() (*pb.Input, error) {
if o.err != nil {
return nil, o.err
}
var index pb.OutputIndex
if o.getIndex != nil {
var err error
index, err = o.getIndex()
if err != nil {
return nil, err
}
}
dgst, _, _, err := o.vertex.Marshal()
if err != nil {
return nil, err
}
return &pb.Input{Digest: dgst, Index: index}, nil
}
func (o *output) Vertex() Vertex {
return o.vertex
}
type MetadataOpt interface {
SetMetadataOption(*OpMetadata)
RunOption
LocalOption
HTTPOption
ImageOption
GitOption
}
type metadataOptFunc func(m *OpMetadata)
func (fn metadataOptFunc) SetMetadataOption(m *OpMetadata) {
fn(m)
}
func (fn metadataOptFunc) SetRunOption(ei *ExecInfo) {
ei.ApplyMetadata(fn)
}
func (fn metadataOptFunc) SetLocalOption(li *LocalInfo) {
li.ApplyMetadata(fn)
}
func (fn metadataOptFunc) SetHTTPOption(hi *HTTPInfo) {
hi.ApplyMetadata(fn)
}
func (fn metadataOptFunc) SetImageOption(ii *ImageInfo) {
ii.ApplyMetadata(fn)
}
func (fn metadataOptFunc) SetGitOption(gi *GitInfo) {
gi.ApplyMetadata(fn)
}
func mergeMetadata(m1, m2 OpMetadata) OpMetadata {
if m2.IgnoreCache {
m1.IgnoreCache = true
}
if len(m2.Description) > 0 {
if m1.Description == nil {
m1.Description = make(map[string]string)
}
for k, v := range m2.Description {
m1.Description[k] = v
}
}
if m2.ExportCache != nil {
m1.ExportCache = m2.ExportCache
}
return m1
}
var IgnoreCache = metadataOptFunc(func(md *OpMetadata) {
md.IgnoreCache = true
})
func WithDescription(m map[string]string) MetadataOpt {
return metadataOptFunc(func(md *OpMetadata) {
md.Description = m
})
}
// WithExportCache forces results for this vertex to be exported with the cache
func WithExportCache() MetadataOpt {
return metadataOptFunc(func(md *OpMetadata) {
md.ExportCache = &pb.ExportCache{Value: true}
})
}
// WithoutExportCache sets results for this vertex to be not exported with
// the cache
func WithoutExportCache() MetadataOpt {
return metadataOptFunc(func(md *OpMetadata) {
// ExportCache with value false means to disable exporting
md.ExportCache = &pb.ExportCache{Value: false}
})
}
// WithoutDefaultExportCache resets the cache export for the vertex to use
// the default defined by the build configuration.
func WithoutDefaultExportCache() MetadataOpt {
return metadataOptFunc(func(md *OpMetadata) {
// nil means no vertex based config has been set
md.ExportCache = nil
})
}
type opMetaWrapper struct {
OpMetadata
}
func (mw *opMetaWrapper) ApplyMetadata(f func(m *OpMetadata)) {
f(&mw.OpMetadata)
}
func (mw *opMetaWrapper) Metadata() OpMetadata {
return mw.OpMetadata
}

50
vendor/github.com/moby/buildkit/client/prune.go generated vendored Normal file
View file

@ -0,0 +1,50 @@
package client
import (
"context"
"io"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/pkg/errors"
)
func (c *Client) Prune(ctx context.Context, ch chan UsageInfo, opts ...PruneOption) error {
info := &PruneInfo{}
for _, o := range opts {
o(info)
}
req := &controlapi.PruneRequest{}
cl, err := c.controlClient().Prune(ctx, req)
if err != nil {
return errors.Wrap(err, "failed to call prune")
}
for {
d, err := cl.Recv()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
if ch != nil {
ch <- UsageInfo{
ID: d.ID,
Mutable: d.Mutable,
InUse: d.InUse,
Size: d.Size_,
Parent: d.Parent,
CreatedAt: d.CreatedAt,
Description: d.Description,
UsageCount: int(d.UsageCount),
LastUsedAt: d.LastUsedAt,
}
}
}
}
type PruneOption func(*PruneInfo)
type PruneInfo struct {
}

251
vendor/github.com/moby/buildkit/client/solve.go generated vendored Normal file
View file

@ -0,0 +1,251 @@
package client
import (
"context"
"io"
"os"
"path/filepath"
"strings"
"time"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/filesync"
"github.com/moby/buildkit/session/grpchijack"
"github.com/moby/buildkit/solver/pb"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
type SolveOpt struct {
Exporter string
ExporterAttrs map[string]string
ExporterOutput io.WriteCloser // for ExporterOCI and ExporterDocker
ExporterOutputDir string // for ExporterLocal
LocalDirs map[string]string
SharedKey string
Frontend string
FrontendAttrs map[string]string
ExportCache string
ExportCacheAttrs map[string]string
ImportCache []string
Session []session.Attachable
}
// Solve calls Solve on the controller.
// def must be nil if (and only if) opt.Frontend is set.
func (c *Client) Solve(ctx context.Context, def *llb.Definition, opt SolveOpt, statusChan chan *SolveStatus) (*SolveResponse, error) {
defer func() {
if statusChan != nil {
close(statusChan)
}
}()
if opt.Frontend == "" && def == nil {
return nil, errors.New("invalid empty definition")
}
if opt.Frontend != "" && def != nil {
return nil, errors.Errorf("invalid definition for frontend %s", opt.Frontend)
}
syncedDirs, err := prepareSyncedDirs(def, opt.LocalDirs)
if err != nil {
return nil, err
}
ref := identity.NewID()
eg, ctx := errgroup.WithContext(ctx)
statusContext, cancelStatus := context.WithCancel(context.Background())
defer cancelStatus()
if span := opentracing.SpanFromContext(ctx); span != nil {
statusContext = opentracing.ContextWithSpan(statusContext, span)
}
s, err := session.NewSession(statusContext, defaultSessionName(), opt.SharedKey)
if err != nil {
return nil, errors.Wrap(err, "failed to create session")
}
if len(syncedDirs) > 0 {
s.Allow(filesync.NewFSSyncProvider(syncedDirs))
}
for _, a := range opt.Session {
s.Allow(a)
}
switch opt.Exporter {
case ExporterLocal:
if opt.ExporterOutput != nil {
return nil, errors.New("output file writer is not supported by local exporter")
}
if opt.ExporterOutputDir == "" {
return nil, errors.New("output directory is required for local exporter")
}
s.Allow(filesync.NewFSSyncTargetDir(opt.ExporterOutputDir))
case ExporterOCI, ExporterDocker:
if opt.ExporterOutputDir != "" {
return nil, errors.Errorf("output directory %s is not supported by %s exporter", opt.ExporterOutputDir, opt.Exporter)
}
if opt.ExporterOutput == nil {
return nil, errors.Errorf("output file writer is required for %s exporter", opt.Exporter)
}
s.Allow(filesync.NewFSSyncTarget(opt.ExporterOutput))
default:
if opt.ExporterOutput != nil {
return nil, errors.Errorf("output file writer is not supported by %s exporter", opt.Exporter)
}
if opt.ExporterOutputDir != "" {
return nil, errors.Errorf("output directory %s is not supported by %s exporter", opt.ExporterOutputDir, opt.Exporter)
}
}
eg.Go(func() error {
return s.Run(statusContext, grpchijack.Dialer(c.controlClient()))
})
var res *SolveResponse
eg.Go(func() error {
defer func() { // make sure the Status ends cleanly on build errors
go func() {
<-time.After(3 * time.Second)
cancelStatus()
}()
logrus.Debugf("stopping session")
s.Close()
}()
var pbd *pb.Definition
if def != nil {
pbd = def.ToPB()
}
resp, err := c.controlClient().Solve(ctx, &controlapi.SolveRequest{
Ref: ref,
Definition: pbd,
Exporter: opt.Exporter,
ExporterAttrs: opt.ExporterAttrs,
Session: s.ID(),
Frontend: opt.Frontend,
FrontendAttrs: opt.FrontendAttrs,
Cache: controlapi.CacheOptions{
ExportRef: opt.ExportCache,
ImportRefs: opt.ImportCache,
ExportAttrs: opt.ExportCacheAttrs,
},
})
if err != nil {
return errors.Wrap(err, "failed to solve")
}
res = &SolveResponse{
ExporterResponse: resp.ExporterResponse,
}
return nil
})
eg.Go(func() error {
stream, err := c.controlClient().Status(statusContext, &controlapi.StatusRequest{
Ref: ref,
})
if err != nil {
return errors.Wrap(err, "failed to get status")
}
for {
resp, err := stream.Recv()
if err != nil {
if err == io.EOF {
return nil
}
return errors.Wrap(err, "failed to receive status")
}
s := SolveStatus{}
for _, v := range resp.Vertexes {
s.Vertexes = append(s.Vertexes, &Vertex{
Digest: v.Digest,
Inputs: v.Inputs,
Name: v.Name,
Started: v.Started,
Completed: v.Completed,
Error: v.Error,
Cached: v.Cached,
})
}
for _, v := range resp.Statuses {
s.Statuses = append(s.Statuses, &VertexStatus{
ID: v.ID,
Vertex: v.Vertex,
Name: v.Name,
Total: v.Total,
Current: v.Current,
Timestamp: v.Timestamp,
Started: v.Started,
Completed: v.Completed,
})
}
for _, v := range resp.Logs {
s.Logs = append(s.Logs, &VertexLog{
Vertex: v.Vertex,
Stream: int(v.Stream),
Data: v.Msg,
Timestamp: v.Timestamp,
})
}
if statusChan != nil {
statusChan <- &s
}
}
})
if err := eg.Wait(); err != nil {
return nil, err
}
return res, nil
}
func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]filesync.SyncedDir, error) {
for _, d := range localDirs {
fi, err := os.Stat(d)
if err != nil {
return nil, errors.Wrapf(err, "could not find %s", d)
}
if !fi.IsDir() {
return nil, errors.Errorf("%s not a directory", d)
}
}
dirs := make([]filesync.SyncedDir, 0, len(localDirs))
if def == nil {
for name, d := range localDirs {
dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d})
}
} else {
for _, dt := range def.Def {
var op pb.Op
if err := (&op).Unmarshal(dt); err != nil {
return nil, errors.Wrap(err, "failed to parse llb proto op")
}
if src := op.GetSource(); src != nil {
if strings.HasPrefix(src.Identifier, "local://") { // TODO: just make a type property
name := strings.TrimPrefix(src.Identifier, "local://")
d, ok := localDirs[name]
if !ok {
return nil, errors.Errorf("local directory %s not enabled", name)
}
dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d}) // TODO: excludes
}
}
}
}
return dirs, nil
}
func defaultSessionName() string {
wd, err := os.Getwd()
if err != nil {
return "unknown"
}
return filepath.Base(wd)
}

49
vendor/github.com/moby/buildkit/client/workers.go generated vendored Normal file
View file

@ -0,0 +1,49 @@
package client
import (
"context"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/pkg/errors"
)
type WorkerInfo struct {
ID string
Labels map[string]string
}
func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]*WorkerInfo, error) {
info := &ListWorkersInfo{}
for _, o := range opts {
o(info)
}
req := &controlapi.ListWorkersRequest{Filter: info.Filter}
resp, err := c.controlClient().ListWorkers(ctx, req)
if err != nil {
return nil, errors.Wrap(err, "failed to list workers")
}
var wi []*WorkerInfo
for _, w := range resp.Record {
wi = append(wi, &WorkerInfo{
ID: w.ID,
Labels: w.Labels,
})
}
return wi, nil
}
type ListWorkersOption func(*ListWorkersInfo)
type ListWorkersInfo struct {
Filter []string
}
func WithWorkerFilter(f []string) ListWorkersOption {
return func(wi *ListWorkersInfo) {
wi.Filter = f
}
}

292
vendor/github.com/moby/buildkit/control/control.go generated vendored Normal file
View file

@ -0,0 +1,292 @@
package control
import (
"context"
"github.com/docker/distribution/reference"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/cache/remotecache"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/exporter"
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/grpchijack"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/llbsolver"
"github.com/moby/buildkit/worker"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
)
type Opt struct {
SessionManager *session.Manager
WorkerController *worker.Controller
Frontends map[string]frontend.Frontend
CacheKeyStorage solver.CacheKeyStorage
CacheExporter *remotecache.CacheExporter
CacheImporter *remotecache.CacheImporter
}
type Controller struct { // TODO: ControlService
opt Opt
solver *llbsolver.Solver
}
func NewController(opt Opt) (*Controller, error) {
solver := llbsolver.New(opt.WorkerController, opt.Frontends, opt.CacheKeyStorage, opt.CacheImporter)
c := &Controller{
opt: opt,
solver: solver,
}
return c, nil
}
func (c *Controller) Register(server *grpc.Server) error {
controlapi.RegisterControlServer(server, c)
return nil
}
func (c *Controller) DiskUsage(ctx context.Context, r *controlapi.DiskUsageRequest) (*controlapi.DiskUsageResponse, error) {
resp := &controlapi.DiskUsageResponse{}
workers, err := c.opt.WorkerController.List()
if err != nil {
return nil, err
}
for _, w := range workers {
du, err := w.DiskUsage(ctx, client.DiskUsageInfo{
Filter: r.Filter,
})
if err != nil {
return nil, err
}
for _, r := range du {
resp.Record = append(resp.Record, &controlapi.UsageRecord{
// TODO: add worker info
ID: r.ID,
Mutable: r.Mutable,
InUse: r.InUse,
Size_: r.Size,
Parent: r.Parent,
UsageCount: int64(r.UsageCount),
Description: r.Description,
CreatedAt: r.CreatedAt,
LastUsedAt: r.LastUsedAt,
})
}
}
return resp, nil
}
func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Control_PruneServer) error {
ch := make(chan client.UsageInfo)
eg, ctx := errgroup.WithContext(stream.Context())
workers, err := c.opt.WorkerController.List()
if err != nil {
return errors.Wrap(err, "failed to list workers for prune")
}
for _, w := range workers {
func(w worker.Worker) {
eg.Go(func() error {
return w.Prune(ctx, ch)
})
}(w)
}
eg2, ctx := errgroup.WithContext(stream.Context())
eg2.Go(func() error {
defer close(ch)
return eg.Wait()
})
eg2.Go(func() error {
for r := range ch {
if err := stream.Send(&controlapi.UsageRecord{
// TODO: add worker info
ID: r.ID,
Mutable: r.Mutable,
InUse: r.InUse,
Size_: r.Size,
Parent: r.Parent,
UsageCount: int64(r.UsageCount),
Description: r.Description,
CreatedAt: r.CreatedAt,
LastUsedAt: r.LastUsedAt,
}); err != nil {
return err
}
}
return nil
})
return eg2.Wait()
}
func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*controlapi.SolveResponse, error) {
ctx = session.NewContext(ctx, req.Session)
var expi exporter.ExporterInstance
// TODO: multiworker
// This is actually tricky, as the exporter should come from the worker that has the returned reference. We may need to delay this so that the solver loads this.
w, err := c.opt.WorkerController.GetDefault()
if err != nil {
return nil, err
}
if req.Exporter != "" {
exp, err := w.Exporter(req.Exporter)
if err != nil {
return nil, err
}
expi, err = exp.Resolve(ctx, req.ExporterAttrs)
if err != nil {
return nil, err
}
}
var cacheExporter *remotecache.RegistryCacheExporter
if ref := req.Cache.ExportRef; ref != "" {
parsed, err := reference.ParseNormalizedNamed(ref)
if err != nil {
return nil, err
}
exportCacheRef := reference.TagNameOnly(parsed).String()
cacheExporter = c.opt.CacheExporter.ExporterForTarget(exportCacheRef)
}
var importCacheRefs []string
for _, ref := range req.Cache.ImportRefs {
parsed, err := reference.ParseNormalizedNamed(ref)
if err != nil {
return nil, err
}
importCacheRefs = append(importCacheRefs, reference.TagNameOnly(parsed).String())
}
resp, err := c.solver.Solve(ctx, req.Ref, frontend.SolveRequest{
Frontend: req.Frontend,
Definition: req.Definition,
FrontendOpt: req.FrontendAttrs,
ImportCacheRefs: importCacheRefs,
}, llbsolver.ExporterRequest{
Exporter: expi,
CacheExporter: cacheExporter,
CacheExportMode: parseCacheExporterOpt(req.Cache.ExportAttrs),
})
if err != nil {
return nil, err
}
return &controlapi.SolveResponse{
ExporterResponse: resp.ExporterResponse,
}, nil
}
func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Control_StatusServer) error {
ch := make(chan *client.SolveStatus, 8)
eg, ctx := errgroup.WithContext(stream.Context())
eg.Go(func() error {
return c.solver.Status(ctx, req.Ref, ch)
})
eg.Go(func() error {
for {
ss, ok := <-ch
if !ok {
return nil
}
sr := controlapi.StatusResponse{}
for _, v := range ss.Vertexes {
sr.Vertexes = append(sr.Vertexes, &controlapi.Vertex{
Digest: v.Digest,
Inputs: v.Inputs,
Name: v.Name,
Started: v.Started,
Completed: v.Completed,
Error: v.Error,
Cached: v.Cached,
})
}
for _, v := range ss.Statuses {
sr.Statuses = append(sr.Statuses, &controlapi.VertexStatus{
ID: v.ID,
Vertex: v.Vertex,
Name: v.Name,
Current: v.Current,
Total: v.Total,
Timestamp: v.Timestamp,
Started: v.Started,
Completed: v.Completed,
})
}
for _, v := range ss.Logs {
sr.Logs = append(sr.Logs, &controlapi.VertexLog{
Vertex: v.Vertex,
Stream: int64(v.Stream),
Msg: v.Data,
Timestamp: v.Timestamp,
})
}
if err := stream.SendMsg(&sr); err != nil {
return err
}
}
})
return eg.Wait()
}
func (c *Controller) Session(stream controlapi.Control_SessionServer) error {
logrus.Debugf("session started")
conn, closeCh, opts := grpchijack.Hijack(stream)
defer conn.Close()
ctx, cancel := context.WithCancel(stream.Context())
go func() {
<-closeCh
cancel()
}()
err := c.opt.SessionManager.HandleConn(ctx, conn, opts)
logrus.Debugf("session finished: %v", err)
return err
}
func (c *Controller) ListWorkers(ctx context.Context, r *controlapi.ListWorkersRequest) (*controlapi.ListWorkersResponse, error) {
resp := &controlapi.ListWorkersResponse{}
workers, err := c.opt.WorkerController.List(r.Filter...)
if err != nil {
return nil, err
}
for _, w := range workers {
resp.Record = append(resp.Record, &controlapi.WorkerRecord{
ID: w.ID(),
Labels: w.Labels(),
})
}
return resp, nil
}
func parseCacheExporterOpt(opt map[string]string) solver.CacheExportMode {
for k, v := range opt {
switch k {
case "mode":
switch v {
case "min":
return solver.CacheExportModeMin
case "max":
return solver.CacheExportModeMax
default:
logrus.Debugf("skipping incalid cache export mode: %s", v)
}
default:
logrus.Warnf("skipping invalid cache export opt: %s", v)
}
}
return solver.CacheExportModeMin
}

30
vendor/github.com/moby/buildkit/executor/executor.go generated vendored Normal file
View file

@ -0,0 +1,30 @@
package executor
import (
"context"
"io"
"github.com/moby/buildkit/cache"
)
type Meta struct {
Args []string
Env []string
User string
Cwd string
Tty bool
ReadonlyRootFS bool
// DisableNetworking bool
}
type Mount struct {
Src cache.Mountable
Selector string
Dest string
Readonly bool
}
type Executor interface {
// TODO: add stdout/err
Exec(ctx context.Context, meta Meta, rootfs cache.Mountable, mounts []Mount, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error
}

38
vendor/github.com/moby/buildkit/executor/oci/hosts.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
package oci
import (
"context"
"io/ioutil"
"os"
"path/filepath"
)
const hostsContent = `
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
`
func GetHostsFile(ctx context.Context, stateDir string) (string, error) {
p := filepath.Join(stateDir, "hosts")
_, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) {
_, err := os.Stat(p)
if err == nil {
return "", nil
}
if !os.IsNotExist(err) {
return "", err
}
if err := ioutil.WriteFile(p+".tmp", []byte(hostsContent), 0644); err != nil {
return "", err
}
if err := os.Rename(p+".tmp", p); err != nil {
return "", err
}
return "", nil
})
if err != nil {
return "", err
}
return p, nil
}

68
vendor/github.com/moby/buildkit/executor/oci/mounts.go generated vendored Normal file
View file

@ -0,0 +1,68 @@
package oci
import (
"context"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
// MountOpts sets oci spec specific info for mount points
type MountOpts func([]specs.Mount) []specs.Mount
//GetMounts returns default required for buildkit
// https://github.com/moby/buildkit/issues/429
func GetMounts(ctx context.Context, mountOpts ...MountOpts) []specs.Mount {
mounts := []specs.Mount{
{
Destination: "/proc",
Type: "proc",
Source: "proc",
},
{
Destination: "/dev",
Type: "tmpfs",
Source: "tmpfs",
Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"},
},
{
Destination: "/dev/pts",
Type: "devpts",
Source: "devpts",
Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"},
},
{
Destination: "/dev/shm",
Type: "tmpfs",
Source: "shm",
Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"},
},
{
Destination: "/dev/mqueue",
Type: "mqueue",
Source: "mqueue",
Options: []string{"nosuid", "noexec", "nodev"},
},
{
Destination: "/sys",
Type: "sysfs",
Source: "sysfs",
Options: []string{"nosuid", "noexec", "nodev", "ro"},
},
}
for _, o := range mountOpts {
mounts = o(mounts)
}
return mounts
}
func withROBind(src, dest string) func(m []specs.Mount) []specs.Mount {
return func(m []specs.Mount) []specs.Mount {
m = append(m, specs.Mount{
Destination: dest,
Type: "bind",
Source: src,
Options: []string{"rbind", "ro"},
})
return m
}
}

View file

@ -0,0 +1,81 @@
package oci
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"github.com/docker/libnetwork/resolvconf"
"github.com/moby/buildkit/util/flightcontrol"
)
var g flightcontrol.Group
var notFirstRun bool
var lastNotEmpty bool
func GetResolvConf(ctx context.Context, stateDir string) (string, error) {
p := filepath.Join(stateDir, "resolv.conf")
_, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) {
generate := !notFirstRun
notFirstRun = true
if !generate {
fi, err := os.Stat(p)
if err != nil {
if !os.IsNotExist(err) {
return "", err
}
generate = true
}
if !generate {
fiMain, err := os.Stat("/etc/resolv.conf")
if err != nil {
if !os.IsNotExist(err) {
return nil, err
}
if lastNotEmpty {
generate = true
lastNotEmpty = false
}
} else {
if fi.ModTime().Before(fiMain.ModTime()) {
generate = true
}
}
}
}
if !generate {
return "", nil
}
var dt []byte
f, err := resolvconf.Get()
if err != nil {
if !os.IsNotExist(err) {
return "", err
}
} else {
dt = f.Content
}
f, err = resolvconf.FilterResolvDNS(dt, true)
if err != nil {
return "", err
}
if err := ioutil.WriteFile(p+".tmp", f.Content, 0644); err != nil {
return "", err
}
if err := os.Rename(p+".tmp", p); err != nil {
return "", err
}
return "", nil
})
if err != nil {
return "", err
}
return p, nil
}

View file

@ -0,0 +1,163 @@
// +build !windows
package oci
import (
"context"
"path"
"sync"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/oci"
"github.com/mitchellh/hashstructure"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/snapshot"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
// Ideally we don't have to import whole containerd just for the default spec
// GenerateSpec generates spec using containerd functionality.
func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, opts ...oci.SpecOpts) (*specs.Spec, func(), error) {
c := &containers.Container{
ID: id,
}
_, ok := namespaces.Namespace(ctx)
if !ok {
ctx = namespaces.WithNamespace(ctx, "buildkit")
}
opts = append(opts,
oci.WithHostNamespace(specs.NetworkNamespace),
)
// Note that containerd.GenerateSpec is namespaced so as to make
// specs.Linux.CgroupsPath namespaced
s, err := oci.GenerateSpec(ctx, nil, c, opts...)
if err != nil {
return nil, nil, err
}
s.Process.Args = meta.Args
s.Process.Env = meta.Env
s.Process.Cwd = meta.Cwd
s.Mounts = GetMounts(ctx,
withROBind(resolvConf, "/etc/resolv.conf"),
withROBind(hostsFile, "/etc/hosts"),
)
// TODO: User
sm := &submounts{}
var releasers []func() error
releaseAll := func() {
sm.cleanup()
for _, f := range releasers {
f()
}
}
for _, m := range mounts {
if m.Src == nil {
return nil, nil, errors.Errorf("mount %s has no source", m.Dest)
}
mountable, err := m.Src.Mount(ctx, m.Readonly)
if err != nil {
releaseAll()
return nil, nil, errors.Wrapf(err, "failed to mount %s", m.Dest)
}
mounts, err := mountable.Mount()
if err != nil {
releaseAll()
return nil, nil, errors.WithStack(err)
}
releasers = append(releasers, mountable.Release)
for _, mount := range mounts {
mount, err = sm.subMount(mount, m.Selector)
if err != nil {
releaseAll()
return nil, nil, err
}
s.Mounts = append(s.Mounts, specs.Mount{
Destination: m.Dest,
Type: mount.Type,
Source: mount.Source,
Options: mount.Options,
})
}
}
return s, releaseAll, nil
}
type mountRef struct {
mount mount.Mount
unmount func() error
}
type submounts struct {
m map[uint64]mountRef
}
func (s *submounts) subMount(m mount.Mount, subPath string) (mount.Mount, error) {
if path.Join("/", subPath) == "/" {
return m, nil
}
if s.m == nil {
s.m = map[uint64]mountRef{}
}
h, err := hashstructure.Hash(m, nil)
if err != nil {
return mount.Mount{}, nil
}
if mr, ok := s.m[h]; ok {
return sub(mr.mount, subPath), nil
}
lm := snapshot.LocalMounterWithMounts([]mount.Mount{m})
mp, err := lm.Mount()
if err != nil {
return mount.Mount{}, err
}
opts := []string{"rbind"}
for _, opt := range m.Options {
if opt == "ro" {
opts = append(opts, opt)
}
}
s.m[h] = mountRef{
mount: mount.Mount{
Source: mp,
Type: "bind",
Options: opts,
},
unmount: lm.Unmount,
}
return sub(s.m[h].mount, subPath), nil
}
func (s *submounts) cleanup() {
var wg sync.WaitGroup
wg.Add(len(s.m))
for _, m := range s.m {
func(m mountRef) {
go func() {
m.unmount()
wg.Done()
}()
}(m)
}
wg.Wait()
}
func sub(m mount.Mount, subPath string) mount.Mount {
m.Source = path.Join(m.Source, subPath)
return m
}

86
vendor/github.com/moby/buildkit/executor/oci/user.go generated vendored Normal file
View file

@ -0,0 +1,86 @@
package oci
import (
"context"
"os"
"strconv"
"strings"
"github.com/containerd/continuity/fs"
"github.com/opencontainers/runc/libcontainer/user"
)
func GetUser(ctx context.Context, root, username string) (uint32, uint32, error) {
// fast path from uid/gid
if uid, gid, err := ParseUser(username); err == nil {
return uid, gid, nil
}
passwdPath, err := user.GetPasswdPath()
if err != nil {
return 0, 0, err
}
groupPath, err := user.GetGroupPath()
if err != nil {
return 0, 0, err
}
passwdFile, err := openUserFile(root, passwdPath)
if err == nil {
defer passwdFile.Close()
}
groupFile, err := openUserFile(root, groupPath)
if err == nil {
defer groupFile.Close()
}
execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile)
if err != nil {
return 0, 0, err
}
return uint32(execUser.Uid), uint32(execUser.Gid), nil
}
func ParseUser(str string) (uid uint32, gid uint32, err error) {
if str == "" {
return 0, 0, nil
}
parts := strings.SplitN(str, ":", 2)
for i, v := range parts {
switch i {
case 0:
uid, err = parseUID(v)
if err != nil {
return 0, 0, err
}
if len(parts) == 1 {
gid = uid
}
case 1:
gid, err = parseUID(v)
if err != nil {
return 0, 0, err
}
}
}
return
}
func openUserFile(root, p string) (*os.File, error) {
p, err := fs.RootPath(root, p)
if err != nil {
return nil, err
}
return os.Open(p)
}
func parseUID(str string) (uint32, error) {
if str == "root" {
return 0, nil
}
uid, err := strconv.ParseUint(str, 10, 32)
if err != nil {
return 0, err
}
return uint32(uid), nil
}

View file

@ -0,0 +1,249 @@
package runcexecutor
import (
"context"
"encoding/json"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"syscall"
"github.com/containerd/containerd/contrib/seccomp"
"github.com/containerd/containerd/mount"
containerdoci "github.com/containerd/containerd/oci"
"github.com/containerd/continuity/fs"
runc "github.com/containerd/go-runc"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/executor/oci"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/util/libcontainer_specconv"
"github.com/moby/buildkit/util/system"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type Opt struct {
// root directory
Root string
CommandCandidates []string
// without root privileges (has nothing to do with Opt.Root directory)
Rootless bool
}
var defaultCommandCandidates = []string{"buildkit-runc", "runc"}
type runcExecutor struct {
runc *runc.Runc
root string
cmd string
rootless bool
}
func New(opt Opt) (executor.Executor, error) {
cmds := opt.CommandCandidates
if cmds == nil {
cmds = defaultCommandCandidates
}
var cmd string
var found bool
for _, cmd = range cmds {
if _, err := exec.LookPath(cmd); err == nil {
found = true
break
}
}
if !found {
return nil, errors.Errorf("failed to find %s binary", cmd)
}
root := opt.Root
if err := os.MkdirAll(root, 0700); err != nil {
return nil, errors.Wrapf(err, "failed to create %s", root)
}
root, err := filepath.Abs(root)
if err != nil {
return nil, err
}
root, err = filepath.EvalSymlinks(root)
if err != nil {
return nil, err
}
runtime := &runc.Runc{
Command: cmd,
Log: filepath.Join(root, "runc-log.json"),
LogFormat: runc.JSON,
PdeathSignal: syscall.SIGKILL,
Setpgid: true,
}
w := &runcExecutor{
runc: runtime,
root: root,
rootless: opt.Rootless,
}
return w, nil
}
func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache.Mountable, mounts []executor.Mount, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error {
resolvConf, err := oci.GetResolvConf(ctx, w.root)
if err != nil {
return err
}
hostsFile, err := oci.GetHostsFile(ctx, w.root)
if err != nil {
return err
}
mountable, err := root.Mount(ctx, false)
if err != nil {
return err
}
rootMount, err := mountable.Mount()
if err != nil {
return err
}
defer mountable.Release()
id := identity.NewID()
bundle := filepath.Join(w.root, id)
if err := os.Mkdir(bundle, 0700); err != nil {
return err
}
defer os.RemoveAll(bundle)
rootFSPath := filepath.Join(bundle, "rootfs")
if err := os.Mkdir(rootFSPath, 0700); err != nil {
return err
}
if err := mount.All(rootMount, rootFSPath); err != nil {
return err
}
defer mount.Unmount(rootFSPath, 0)
uid, gid, err := oci.GetUser(ctx, rootFSPath, meta.User)
if err != nil {
return err
}
f, err := os.Create(filepath.Join(bundle, "config.json"))
if err != nil {
return err
}
defer f.Close()
opts := []containerdoci.SpecOpts{containerdoci.WithUIDGID(uid, gid)}
if system.SeccompSupported() {
opts = append(opts, seccomp.WithDefaultProfile())
}
if meta.ReadonlyRootFS {
opts = append(opts, containerdoci.WithRootFSReadonly())
}
spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, opts...)
if err != nil {
return err
}
defer cleanup()
spec.Root.Path = rootFSPath
if _, ok := root.(cache.ImmutableRef); ok { // TODO: pass in with mount, not ref type
spec.Root.Readonly = true
}
newp, err := fs.RootPath(rootFSPath, meta.Cwd)
if err != nil {
return errors.Wrapf(err, "working dir %s points to invalid target", newp)
}
if err := os.MkdirAll(newp, 0700); err != nil {
return errors.Wrapf(err, "failed to create working directory %s", newp)
}
if w.rootless {
specconv.ToRootless(spec, &specconv.RootlessOpts{
MapSubUIDGID: true,
})
// TODO(AkihiroSuda): keep Cgroups enabled if /sys/fs/cgroup/cpuset/buildkit exists and writable
spec.Linux.CgroupsPath = ""
// TODO(AkihiroSuda): ToRootless removes netns, but we should readd netns here
// if either SUID or userspace NAT is configured on the host.
if err := setOOMScoreAdj(spec); err != nil {
return err
}
}
if err := json.NewEncoder(f).Encode(spec); err != nil {
return err
}
logrus.Debugf("> running %s %v", id, meta.Args)
status, err := w.runc.Run(ctx, id, bundle, &runc.CreateOpts{
IO: &forwardIO{stdin: stdin, stdout: stdout, stderr: stderr},
})
logrus.Debugf("< completed %s %v %v", id, status, err)
if status != 0 {
select {
case <-ctx.Done():
// runc can't report context.Cancelled directly
return errors.Wrapf(ctx.Err(), "exit code %d", status)
default:
}
return errors.Errorf("exit code %d", status)
}
return err
}
type forwardIO struct {
stdin io.ReadCloser
stdout, stderr io.WriteCloser
}
func (s *forwardIO) Close() error {
return nil
}
func (s *forwardIO) Set(cmd *exec.Cmd) {
cmd.Stdin = s.stdin
cmd.Stdout = s.stdout
cmd.Stderr = s.stderr
}
func (s *forwardIO) Stdin() io.WriteCloser {
return nil
}
func (s *forwardIO) Stdout() io.ReadCloser {
return nil
}
func (s *forwardIO) Stderr() io.ReadCloser {
return nil
}
// setOOMScoreAdj comes from https://github.com/genuinetools/img/blob/2fabe60b7dc4623aa392b515e013bbc69ad510ab/executor/runc/executor.go#L182-L192
func setOOMScoreAdj(spec *specs.Spec) error {
// Set the oom_score_adj of our children containers to that of the current process.
b, err := ioutil.ReadFile("/proc/self/oom_score_adj")
if err != nil {
return errors.Wrap(err, "failed to read /proc/self/oom_score_adj")
}
s := strings.TrimSpace(string(b))
oom, err := strconv.Atoi(s)
if err != nil {
return errors.Wrapf(err, "failed to parse %s as int", s)
}
spec.Process.OOMScoreAdj = &oom
return nil
}

16
vendor/github.com/moby/buildkit/exporter/exporter.go generated vendored Normal file
View file

@ -0,0 +1,16 @@
package exporter
import (
"context"
"github.com/moby/buildkit/cache"
)
type Exporter interface {
Resolve(context.Context, map[string]string) (ExporterInstance, error)
}
type ExporterInstance interface {
Name() string
Export(context.Context, cache.ImmutableRef, map[string][]byte) (map[string]string, error)
}

View file

@ -0,0 +1,276 @@
package builder
import (
"archive/tar"
"bytes"
"context"
"encoding/json"
"regexp"
"strings"
"github.com/docker/docker/builder/dockerignore"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb"
"github.com/moby/buildkit/frontend/gateway/client"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
const (
LocalNameContext = "context"
LocalNameDockerfile = "dockerfile"
keyTarget = "target"
keyFilename = "filename"
keyCacheFrom = "cache-from"
exporterImageConfig = "containerimage.config"
defaultDockerfileName = "Dockerfile"
dockerignoreFilename = ".dockerignore"
buildArgPrefix = "build-arg:"
labelPrefix = "label:"
keyNoCache = "no-cache"
)
var httpPrefix = regexp.MustCompile("^https?://")
var gitUrlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$")
func Build(ctx context.Context, c client.Client) error {
opts := c.Opts()
filename := opts[keyFilename]
if filename == "" {
filename = defaultDockerfileName
}
var ignoreCache []string
if v, ok := opts[keyNoCache]; ok {
if v == "" {
ignoreCache = []string{} // means all stages
} else {
ignoreCache = strings.Split(v, ",")
}
}
src := llb.Local(LocalNameDockerfile,
llb.IncludePatterns([]string{filename}),
llb.SessionID(c.SessionID()),
llb.SharedKeyHint(defaultDockerfileName),
)
var buildContext *llb.State
isScratchContext := false
if st, ok := detectGitContext(opts[LocalNameContext]); ok {
src = *st
buildContext = &src
} else if httpPrefix.MatchString(opts[LocalNameContext]) {
httpContext := llb.HTTP(opts[LocalNameContext], llb.Filename("context"))
def, err := httpContext.Marshal()
if err != nil {
return err
}
ref, err := c.Solve(ctx, client.SolveRequest{
Definition: def.ToPB(),
}, nil, false)
if err != nil {
return err
}
dt, err := ref.ReadFile(ctx, client.ReadRequest{
Filename: "context",
Range: &client.FileRange{
Length: 1024,
},
})
if err != nil {
return err
}
if isArchive(dt) {
unpack := llb.Image(dockerfile2llb.CopyImage).
Run(llb.Shlex("copy --unpack /src/context /out/"), llb.ReadonlyRootFS())
unpack.AddMount("/src", httpContext, llb.Readonly)
src = unpack.AddMount("/out", llb.Scratch())
buildContext = &src
} else {
filename = "context"
src = httpContext
buildContext = &src
isScratchContext = true
}
}
def, err := src.Marshal()
if err != nil {
return err
}
eg, ctx2 := errgroup.WithContext(ctx)
var dtDockerfile []byte
eg.Go(func() error {
ref, err := c.Solve(ctx2, client.SolveRequest{
Definition: def.ToPB(),
}, nil, false)
if err != nil {
return err
}
dtDockerfile, err = ref.ReadFile(ctx2, client.ReadRequest{
Filename: filename,
})
if err != nil {
return err
}
return nil
})
var excludes []string
if !isScratchContext {
eg.Go(func() error {
dockerignoreState := buildContext
if dockerignoreState == nil {
st := llb.Local(LocalNameContext,
llb.SessionID(c.SessionID()),
llb.IncludePatterns([]string{dockerignoreFilename}),
llb.SharedKeyHint(dockerignoreFilename),
)
dockerignoreState = &st
}
def, err := dockerignoreState.Marshal()
if err != nil {
return err
}
ref, err := c.Solve(ctx2, client.SolveRequest{
Definition: def.ToPB(),
}, nil, false)
if err != nil {
return err
}
dtDockerignore, err := ref.ReadFile(ctx2, client.ReadRequest{
Filename: dockerignoreFilename,
})
if err == nil {
excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dtDockerignore))
if err != nil {
return errors.Wrap(err, "failed to parse dockerignore")
}
}
return nil
})
}
if err := eg.Wait(); err != nil {
return err
}
if _, ok := c.Opts()["cmdline"]; !ok {
ref, cmdline, ok := dockerfile2llb.DetectSyntax(bytes.NewBuffer(dtDockerfile))
if ok {
return forwardGateway(ctx, c, ref, cmdline)
}
}
st, img, err := dockerfile2llb.Dockerfile2LLB(ctx, dtDockerfile, dockerfile2llb.ConvertOpt{
Target: opts[keyTarget],
MetaResolver: c,
BuildArgs: filter(opts, buildArgPrefix),
Labels: filter(opts, labelPrefix),
SessionID: c.SessionID(),
BuildContext: buildContext,
Excludes: excludes,
IgnoreCache: ignoreCache,
})
if err != nil {
return err
}
def, err = st.Marshal()
if err != nil {
return err
}
config, err := json.Marshal(img)
if err != nil {
return err
}
var cacheFrom []string
if cacheFromStr := opts[keyCacheFrom]; cacheFromStr != "" {
cacheFrom = strings.Split(cacheFromStr, ",")
}
_, err = c.Solve(ctx, client.SolveRequest{
Definition: def.ToPB(),
ImportCacheRefs: cacheFrom,
}, map[string][]byte{
exporterImageConfig: config,
}, true)
if err != nil {
return err
}
return nil
}
func forwardGateway(ctx context.Context, c client.Client, ref string, cmdline string) error {
opts := c.Opts()
if opts == nil {
opts = map[string]string{}
}
opts["cmdline"] = cmdline
opts["source"] = ref
_, err := c.Solve(ctx, client.SolveRequest{
Frontend: "gateway.v0",
FrontendOpt: opts,
}, nil, true)
return err
}
func filter(opt map[string]string, key string) map[string]string {
m := map[string]string{}
for k, v := range opt {
if strings.HasPrefix(k, key) {
m[strings.TrimPrefix(k, key)] = v
}
}
return m
}
func detectGitContext(ref string) (*llb.State, bool) {
found := false
if httpPrefix.MatchString(ref) && gitUrlPathWithFragmentSuffix.MatchString(ref) {
found = true
}
for _, prefix := range []string{"git://", "github.com/", "git@"} {
if strings.HasPrefix(ref, prefix) {
found = true
break
}
}
if !found {
return nil, false
}
parts := strings.SplitN(ref, "#", 2)
branch := ""
if len(parts) > 1 {
branch = parts[1]
}
st := llb.Git(parts[0], branch)
return &st, true
}
func isArchive(header []byte) bool {
for _, m := range [][]byte{
{0x42, 0x5A, 0x68}, // bzip2
{0x1F, 0x8B, 0x08}, // gzip
{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, // xz
} {
if len(header) < len(m) {
continue
}
if bytes.Equal(m, header[:len(m)]) {
return true
}
}
r := tar.NewReader(bytes.NewBuffer(header))
_, err := r.Next()
return err == nil
}

View file

@ -0,0 +1,41 @@
package dockerfile
import (
"context"
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/frontend/dockerfile/builder"
"github.com/moby/buildkit/solver"
)
func NewDockerfileFrontend() frontend.Frontend {
return &dfFrontend{}
}
type dfFrontend struct{}
func (f *dfFrontend) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (retRef solver.CachedResult, exporterAttr map[string][]byte, retErr error) {
c, err := llbBridgeToGatewayClient(ctx, llbBridge, opts)
if err != nil {
return nil, nil, err
}
defer func() {
for _, r := range c.refs {
if r != nil && (c.final != r || retErr != nil) {
r.Release(context.TODO())
}
}
}()
if err := builder.Build(ctx, c); err != nil {
return nil, nil, err
}
if c.final == nil || c.final.CachedResult == nil {
return nil, c.exporterAttr, nil
}
return c.final, c.exporterAttr, nil
}

View file

@ -0,0 +1,932 @@
package dockerfile2llb
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/url"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
"github.com/docker/distribution/reference"
"github.com/docker/docker/pkg/signal"
"github.com/docker/go-connections/nat"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/client/llb/imagemetaresolver"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/moby/buildkit/frontend/dockerfile/parser"
"github.com/moby/buildkit/frontend/dockerfile/shell"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
const (
emptyImageName = "scratch"
localNameContext = "context"
historyComment = "buildkit.dockerfile.v0"
CopyImage = "tonistiigi/copy:v0.1.3@sha256:87c46e7b413cdd2c2702902b481b390ce263ac9d942253d366f3b1a3c16f96d6"
)
type ConvertOpt struct {
Target string
MetaResolver llb.ImageMetaResolver
BuildArgs map[string]string
Labels map[string]string
SessionID string
BuildContext *llb.State
Excludes []string
// IgnoreCache contains names of the stages that should not use build cache.
// Empty slice means ignore cache for all stages. Nil doesn't disable cache.
IgnoreCache []string
// CacheIDNamespace scopes the IDs for different cache mounts
CacheIDNamespace string
}
func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, error) {
if len(dt) == 0 {
return nil, nil, errors.Errorf("the Dockerfile cannot be empty")
}
dockerfile, err := parser.Parse(bytes.NewReader(dt))
if err != nil {
return nil, nil, err
}
proxyEnv := proxyEnvFromBuildArgs(opt.BuildArgs)
stages, metaArgs, err := instructions.Parse(dockerfile.AST)
if err != nil {
return nil, nil, err
}
for i := range metaArgs {
metaArgs[i] = setBuildArgValue(metaArgs[i], opt.BuildArgs)
}
shlex := shell.NewLex(dockerfile.EscapeToken)
metaResolver := opt.MetaResolver
if metaResolver == nil {
metaResolver = imagemetaresolver.Default()
}
var allDispatchStates []*dispatchState
dispatchStatesByName := map[string]*dispatchState{}
// set base state for every image
for _, st := range stages {
name, err := shlex.ProcessWord(st.BaseName, toEnvList(metaArgs, nil))
if err != nil {
return nil, nil, err
}
st.BaseName = name
ds := &dispatchState{
stage: st,
deps: make(map[*dispatchState]struct{}),
ctxPaths: make(map[string]struct{}),
}
if d, ok := dispatchStatesByName[st.BaseName]; ok {
ds.base = d
}
allDispatchStates = append(allDispatchStates, ds)
if st.Name != "" {
dispatchStatesByName[strings.ToLower(st.Name)] = ds
}
if opt.IgnoreCache != nil {
if len(opt.IgnoreCache) == 0 {
ds.ignoreCache = true
} else if st.Name != "" {
for _, n := range opt.IgnoreCache {
if strings.EqualFold(n, st.Name) {
ds.ignoreCache = true
}
}
}
}
}
var target *dispatchState
if opt.Target == "" {
target = allDispatchStates[len(allDispatchStates)-1]
} else {
var ok bool
target, ok = dispatchStatesByName[strings.ToLower(opt.Target)]
if !ok {
return nil, nil, errors.Errorf("target stage %s could not be found", opt.Target)
}
}
// fill dependencies to stages so unreachable ones can avoid loading image configs
for _, d := range allDispatchStates {
d.commands = make([]command, len(d.stage.Commands))
for i, cmd := range d.stage.Commands {
newCmd, err := toCommand(cmd, dispatchStatesByName, allDispatchStates)
if err != nil {
return nil, nil, err
}
d.commands[i] = newCmd
for _, src := range newCmd.sources {
if src != nil {
d.deps[src] = struct{}{}
if src.unregistered {
allDispatchStates = append(allDispatchStates, src)
}
}
}
}
}
eg, ctx := errgroup.WithContext(ctx)
for i, d := range allDispatchStates {
reachable := isReachable(target, d)
// resolve image config for every stage
if d.base == nil {
if d.stage.BaseName == emptyImageName {
d.state = llb.Scratch()
d.image = emptyImage()
continue
}
func(i int, d *dispatchState) {
eg.Go(func() error {
ref, err := reference.ParseNormalizedNamed(d.stage.BaseName)
if err != nil {
return err
}
d.stage.BaseName = reference.TagNameOnly(ref).String()
var isScratch bool
if metaResolver != nil && reachable {
dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName)
if err == nil { // handle the error while builder is actually running
var img Image
if err := json.Unmarshal(dt, &img); err != nil {
return err
}
img.Created = nil
d.image = img
if dgst != "" {
ref, err = reference.WithDigest(ref, dgst)
if err != nil {
return err
}
}
d.stage.BaseName = ref.String()
_ = ref
if len(img.RootFS.DiffIDs) == 0 {
isScratch = true
}
}
}
if isScratch {
d.state = llb.Scratch()
} else {
d.state = llb.Image(d.stage.BaseName, dfCmd(d.stage.SourceCode))
}
return nil
})
}(i, d)
}
}
if err := eg.Wait(); err != nil {
return nil, nil, err
}
buildContext := &mutableOutput{}
ctxPaths := map[string]struct{}{}
for _, d := range allDispatchStates {
if !isReachable(target, d) {
continue
}
if d.base != nil {
d.state = d.base.state
d.image = clone(d.base.image)
}
// initialize base metadata from image conf
for _, env := range d.image.Config.Env {
parts := strings.SplitN(env, "=", 2)
v := ""
if len(parts) > 1 {
v = parts[1]
}
if err := dispatchEnv(d, &instructions.EnvCommand{Env: []instructions.KeyValuePair{{Key: parts[0], Value: v}}}, false); err != nil {
return nil, nil, err
}
}
if d.image.Config.WorkingDir != "" {
if err = dispatchWorkdir(d, &instructions.WorkdirCommand{Path: d.image.Config.WorkingDir}, false); err != nil {
return nil, nil, err
}
}
if d.image.Config.User != "" {
if err = dispatchUser(d, &instructions.UserCommand{User: d.image.Config.User}, false); err != nil {
return nil, nil, err
}
}
opt := dispatchOpt{
allDispatchStates: allDispatchStates,
dispatchStatesByName: dispatchStatesByName,
metaArgs: metaArgs,
buildArgValues: opt.BuildArgs,
shlex: shlex,
sessionID: opt.SessionID,
buildContext: llb.NewState(buildContext),
proxyEnv: proxyEnv,
cacheIDNamespace: opt.CacheIDNamespace,
}
if err = dispatchOnBuild(d, d.image.Config.OnBuild, opt); err != nil {
return nil, nil, err
}
for _, cmd := range d.commands {
if err := dispatch(d, cmd, opt); err != nil {
return nil, nil, err
}
}
for p := range d.ctxPaths {
ctxPaths[p] = struct{}{}
}
}
if len(opt.Labels) != 0 && target.image.Config.Labels == nil {
target.image.Config.Labels = make(map[string]string, len(opt.Labels))
}
for k, v := range opt.Labels {
target.image.Config.Labels[k] = v
}
opts := []llb.LocalOption{
llb.SessionID(opt.SessionID),
llb.ExcludePatterns(opt.Excludes),
llb.SharedKeyHint(localNameContext),
}
if includePatterns := normalizeContextPaths(ctxPaths); includePatterns != nil {
opts = append(opts, llb.FollowPaths(includePatterns))
}
bc := llb.Local(localNameContext, opts...)
if opt.BuildContext != nil {
bc = *opt.BuildContext
}
buildContext.Output = bc.Output()
return &target.state, &target.image, nil
}
func toCommand(ic instructions.Command, dispatchStatesByName map[string]*dispatchState, allDispatchStates []*dispatchState) (command, error) {
cmd := command{Command: ic}
if c, ok := ic.(*instructions.CopyCommand); ok {
if c.From != "" {
var stn *dispatchState
index, err := strconv.Atoi(c.From)
if err != nil {
stn, ok = dispatchStatesByName[strings.ToLower(c.From)]
if !ok {
stn = &dispatchState{
stage: instructions.Stage{BaseName: c.From},
deps: make(map[*dispatchState]struct{}),
unregistered: true,
}
}
} else {
if index < 0 || index >= len(allDispatchStates) {
return command{}, errors.Errorf("invalid stage index %d", index)
}
stn = allDispatchStates[index]
}
cmd.sources = []*dispatchState{stn}
}
}
if ok := detectRunMount(&cmd, dispatchStatesByName, allDispatchStates); ok {
return cmd, nil
}
return cmd, nil
}
type dispatchOpt struct {
allDispatchStates []*dispatchState
dispatchStatesByName map[string]*dispatchState
metaArgs []instructions.ArgCommand
buildArgValues map[string]string
shlex *shell.Lex
sessionID string
buildContext llb.State
proxyEnv *llb.ProxyEnv
cacheIDNamespace string
}
func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error {
if ex, ok := cmd.Command.(instructions.SupportsSingleWordExpansion); ok {
err := ex.Expand(func(word string) (string, error) {
return opt.shlex.ProcessWord(word, toEnvList(d.buildArgs, d.image.Config.Env))
})
if err != nil {
return err
}
}
var err error
switch c := cmd.Command.(type) {
case *instructions.MaintainerCommand:
err = dispatchMaintainer(d, c)
case *instructions.EnvCommand:
err = dispatchEnv(d, c, true)
case *instructions.RunCommand:
err = dispatchRun(d, c, opt.proxyEnv, cmd.sources, opt)
case *instructions.WorkdirCommand:
err = dispatchWorkdir(d, c, true)
case *instructions.AddCommand:
err = dispatchCopy(d, c.SourcesAndDest, opt.buildContext, true, c, "")
if err == nil {
for _, src := range c.Sources() {
d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{}
}
}
case *instructions.LabelCommand:
err = dispatchLabel(d, c)
case *instructions.OnbuildCommand:
err = dispatchOnbuild(d, c)
case *instructions.CmdCommand:
err = dispatchCmd(d, c)
case *instructions.EntrypointCommand:
err = dispatchEntrypoint(d, c)
case *instructions.HealthCheckCommand:
err = dispatchHealthcheck(d, c)
case *instructions.ExposeCommand:
err = dispatchExpose(d, c, opt.shlex)
case *instructions.UserCommand:
err = dispatchUser(d, c, true)
case *instructions.VolumeCommand:
err = dispatchVolume(d, c)
case *instructions.StopSignalCommand:
err = dispatchStopSignal(d, c)
case *instructions.ShellCommand:
err = dispatchShell(d, c)
case *instructions.ArgCommand:
err = dispatchArg(d, c, opt.metaArgs, opt.buildArgValues)
case *instructions.CopyCommand:
l := opt.buildContext
if len(cmd.sources) != 0 {
l = cmd.sources[0].state
}
err = dispatchCopy(d, c.SourcesAndDest, l, false, c, c.Chown)
if err == nil && len(cmd.sources) == 0 {
for _, src := range c.Sources() {
d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{}
}
}
default:
}
return err
}
type dispatchState struct {
state llb.State
image Image
stage instructions.Stage
base *dispatchState
deps map[*dispatchState]struct{}
buildArgs []instructions.ArgCommand
commands []command
ctxPaths map[string]struct{}
ignoreCache bool
cmdSet bool
unregistered bool
}
type command struct {
instructions.Command
sources []*dispatchState
}
func dispatchOnBuild(d *dispatchState, triggers []string, opt dispatchOpt) error {
for _, trigger := range triggers {
ast, err := parser.Parse(strings.NewReader(trigger))
if err != nil {
return err
}
if len(ast.AST.Children) != 1 {
return errors.New("onbuild trigger should be a single expression")
}
ic, err := instructions.ParseCommand(ast.AST.Children[0])
if err != nil {
return err
}
cmd, err := toCommand(ic, opt.dispatchStatesByName, opt.allDispatchStates)
if err != nil {
return err
}
if err := dispatch(d, cmd, opt); err != nil {
return err
}
}
return nil
}
func dispatchEnv(d *dispatchState, c *instructions.EnvCommand, commit bool) error {
commitMessage := bytes.NewBufferString("ENV")
for _, e := range c.Env {
commitMessage.WriteString(" " + e.String())
d.state = d.state.AddEnv(e.Key, e.Value)
d.image.Config.Env = addEnv(d.image.Config.Env, e.Key, e.Value, true)
}
if commit {
return commitToHistory(&d.image, commitMessage.String(), false, nil)
}
return nil
}
func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyEnv, sources []*dispatchState, dopt dispatchOpt) error {
var args []string = c.CmdLine
if c.PrependShell {
args = withShell(d.image, args)
} else if d.image.Config.Entrypoint != nil {
args = append(d.image.Config.Entrypoint, args...)
}
opt := []llb.RunOption{llb.Args(args)}
for _, arg := range d.buildArgs {
opt = append(opt, llb.AddEnv(arg.Key, getArgValue(arg)))
}
opt = append(opt, dfCmd(c))
if d.ignoreCache {
opt = append(opt, llb.IgnoreCache)
}
if proxy != nil {
opt = append(opt, llb.WithProxy(*proxy))
}
opt = append(opt, dispatchRunMounts(d, c, sources, dopt)...)
d.state = d.state.Run(opt...).Root()
return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs), true, &d.state)
}
func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bool) error {
d.state = d.state.Dir(c.Path)
wd := c.Path
if !path.IsAbs(c.Path) {
wd = path.Join("/", d.image.Config.WorkingDir, wd)
}
d.image.Config.WorkingDir = wd
if commit {
return commitToHistory(&d.image, "WORKDIR "+wd, false, nil)
}
return nil
}
func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint interface{}, chown string) error {
// TODO: this should use CopyOp instead. Current implementation is inefficient
img := llb.Image(CopyImage)
dest := path.Join(".", pathRelativeToWorkingDir(d.state, c.Dest()))
if c.Dest() == "." || c.Dest()[len(c.Dest())-1] == filepath.Separator {
dest += string(filepath.Separator)
}
args := []string{"copy"}
unpack := isAddCommand
mounts := make([]llb.RunOption, 0, len(c.Sources()))
if chown != "" {
args = append(args, fmt.Sprintf("--chown=%s", chown))
_, _, err := parseUser(chown)
if err != nil {
mounts = append(mounts, llb.AddMount("/etc/passwd", d.state, llb.SourcePath("/etc/passwd"), llb.Readonly))
mounts = append(mounts, llb.AddMount("/etc/group", d.state, llb.SourcePath("/etc/group"), llb.Readonly))
}
}
commitMessage := bytes.NewBufferString("")
if isAddCommand {
commitMessage.WriteString("ADD")
} else {
commitMessage.WriteString("COPY")
}
for i, src := range c.Sources() {
commitMessage.WriteString(" " + src)
if isAddCommand && (strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://")) {
// Resources from remote URLs are not decompressed.
// https://docs.docker.com/engine/reference/builder/#add
//
// Note: mixing up remote archives and local archives in a single ADD instruction
// would result in undefined behavior: https://github.com/moby/buildkit/pull/387#discussion_r189494717
unpack = false
u, err := url.Parse(src)
f := "__unnamed__"
if err == nil {
if base := path.Base(u.Path); base != "." && base != "/" {
f = base
}
}
target := path.Join(fmt.Sprintf("/src-%d", i), f)
args = append(args, target)
mounts = append(mounts, llb.AddMount(path.Dir(target), llb.HTTP(src, llb.Filename(f), dfCmd(c)), llb.Readonly))
} else {
d, f := splitWildcards(src)
targetCmd := fmt.Sprintf("/src-%d", i)
targetMount := targetCmd
if f == "" {
f = path.Base(src)
targetMount = path.Join(targetMount, f)
}
targetCmd = path.Join(targetCmd, f)
args = append(args, targetCmd)
mounts = append(mounts, llb.AddMount(targetMount, sourceState, llb.SourcePath(d), llb.Readonly))
}
}
commitMessage.WriteString(" " + c.Dest())
args = append(args, dest)
if unpack {
args = append(args[:1], append([]string{"--unpack"}, args[1:]...)...)
}
opt := []llb.RunOption{llb.Args(args), llb.Dir("/dest"), llb.ReadonlyRootFS(), dfCmd(cmdToPrint)}
if d.ignoreCache {
opt = append(opt, llb.IgnoreCache)
}
run := img.Run(append(opt, mounts...)...)
d.state = run.AddMount("/dest", d.state)
return commitToHistory(&d.image, commitMessage.String(), true, &d.state)
}
func dispatchMaintainer(d *dispatchState, c *instructions.MaintainerCommand) error {
d.image.Author = c.Maintainer
return commitToHistory(&d.image, fmt.Sprintf("MAINTAINER %v", c.Maintainer), false, nil)
}
func dispatchLabel(d *dispatchState, c *instructions.LabelCommand) error {
commitMessage := bytes.NewBufferString("LABEL")
if d.image.Config.Labels == nil {
d.image.Config.Labels = make(map[string]string, len(c.Labels))
}
for _, v := range c.Labels {
d.image.Config.Labels[v.Key] = v.Value
commitMessage.WriteString(" " + v.String())
}
return commitToHistory(&d.image, commitMessage.String(), false, nil)
}
func dispatchOnbuild(d *dispatchState, c *instructions.OnbuildCommand) error {
d.image.Config.OnBuild = append(d.image.Config.OnBuild, c.Expression)
return nil
}
func dispatchCmd(d *dispatchState, c *instructions.CmdCommand) error {
var args []string = c.CmdLine
if c.PrependShell {
args = withShell(d.image, args)
}
d.image.Config.Cmd = args
d.image.Config.ArgsEscaped = true
d.cmdSet = true
return commitToHistory(&d.image, fmt.Sprintf("CMD %q", args), false, nil)
}
func dispatchEntrypoint(d *dispatchState, c *instructions.EntrypointCommand) error {
var args []string = c.CmdLine
if c.PrependShell {
args = withShell(d.image, args)
}
d.image.Config.Entrypoint = args
if !d.cmdSet {
d.image.Config.Cmd = nil
}
return commitToHistory(&d.image, fmt.Sprintf("ENTRYPOINT %q", args), false, nil)
}
func dispatchHealthcheck(d *dispatchState, c *instructions.HealthCheckCommand) error {
d.image.Config.Healthcheck = &HealthConfig{
Test: c.Health.Test,
Interval: c.Health.Interval,
Timeout: c.Health.Timeout,
StartPeriod: c.Health.StartPeriod,
Retries: c.Health.Retries,
}
return commitToHistory(&d.image, fmt.Sprintf("HEALTHCHECK %q", d.image.Config.Healthcheck), false, nil)
}
func dispatchExpose(d *dispatchState, c *instructions.ExposeCommand, shlex *shell.Lex) error {
ports := []string{}
for _, p := range c.Ports {
ps, err := shlex.ProcessWords(p, toEnvList(d.buildArgs, d.image.Config.Env))
if err != nil {
return err
}
ports = append(ports, ps...)
}
c.Ports = ports
ps, _, err := nat.ParsePortSpecs(c.Ports)
if err != nil {
return err
}
if d.image.Config.ExposedPorts == nil {
d.image.Config.ExposedPorts = make(map[string]struct{})
}
for p := range ps {
d.image.Config.ExposedPorts[string(p)] = struct{}{}
}
return commitToHistory(&d.image, fmt.Sprintf("EXPOSE %v", ps), false, nil)
}
func dispatchUser(d *dispatchState, c *instructions.UserCommand, commit bool) error {
d.state = d.state.User(c.User)
d.image.Config.User = c.User
if commit {
return commitToHistory(&d.image, fmt.Sprintf("USER %v", c.User), false, nil)
}
return nil
}
func dispatchVolume(d *dispatchState, c *instructions.VolumeCommand) error {
if d.image.Config.Volumes == nil {
d.image.Config.Volumes = map[string]struct{}{}
}
for _, v := range c.Volumes {
if v == "" {
return errors.New("VOLUME specified can not be an empty string")
}
d.image.Config.Volumes[v] = struct{}{}
}
return commitToHistory(&d.image, fmt.Sprintf("VOLUME %v", c.Volumes), false, nil)
}
func dispatchStopSignal(d *dispatchState, c *instructions.StopSignalCommand) error {
if _, err := signal.ParseSignal(c.Signal); err != nil {
return err
}
d.image.Config.StopSignal = c.Signal
return commitToHistory(&d.image, fmt.Sprintf("STOPSIGNAL %v", c.Signal), false, nil)
}
func dispatchShell(d *dispatchState, c *instructions.ShellCommand) error {
d.image.Config.Shell = c.Shell
return commitToHistory(&d.image, fmt.Sprintf("SHELL %v", c.Shell), false, nil)
}
func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instructions.ArgCommand, buildArgValues map[string]string) error {
commitStr := "ARG " + c.Key
if c.Value != nil {
commitStr += "=" + *c.Value
}
if c.Value == nil {
for _, ma := range metaArgs {
if ma.Key == c.Key {
c.Value = ma.Value
}
}
}
d.buildArgs = append(d.buildArgs, setBuildArgValue(*c, buildArgValues))
return commitToHistory(&d.image, commitStr, false, nil)
}
func pathRelativeToWorkingDir(s llb.State, p string) string {
if path.IsAbs(p) {
return p
}
return path.Join(s.GetDir(), p)
}
func splitWildcards(name string) (string, string) {
i := 0
for ; i < len(name); i++ {
ch := name[i]
if ch == '\\' {
i++
} else if ch == '*' || ch == '?' || ch == '[' {
break
}
}
if i == len(name) {
return name, ""
}
base := path.Base(name[:i])
if name[:i] == "" || strings.HasSuffix(name[:i], string(filepath.Separator)) {
base = ""
}
return path.Dir(name[:i]), base + name[i:]
}
func addEnv(env []string, k, v string, override bool) []string {
gotOne := false
for i, envVar := range env {
envParts := strings.SplitN(envVar, "=", 2)
compareFrom := envParts[0]
if shell.EqualEnvKeys(compareFrom, k) {
if override {
env[i] = k + "=" + v
}
gotOne = true
break
}
}
if !gotOne {
env = append(env, k+"="+v)
}
return env
}
func setBuildArgValue(c instructions.ArgCommand, values map[string]string) instructions.ArgCommand {
if v, ok := values[c.Key]; ok {
c.Value = &v
}
return c
}
func toEnvList(args []instructions.ArgCommand, env []string) []string {
for _, arg := range args {
env = addEnv(env, arg.Key, getArgValue(arg), false)
}
return env
}
func getArgValue(arg instructions.ArgCommand) string {
v := ""
if arg.Value != nil {
v = *arg.Value
}
return v
}
func dfCmd(cmd interface{}) llb.MetadataOpt {
// TODO: add fmt.Stringer to instructions.Command to remove interface{}
var cmdStr string
if cmd, ok := cmd.(fmt.Stringer); ok {
cmdStr = cmd.String()
}
if cmd, ok := cmd.(string); ok {
cmdStr = cmd
}
return llb.WithDescription(map[string]string{
"com.docker.dockerfile.v1.command": cmdStr,
})
}
func runCommandString(args []string, buildArgs []instructions.ArgCommand) string {
var tmpBuildEnv []string
for _, arg := range buildArgs {
tmpBuildEnv = append(tmpBuildEnv, arg.Key+"="+getArgValue(arg))
}
if len(tmpBuildEnv) > 0 {
tmpBuildEnv = append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...)
}
return strings.Join(append(tmpBuildEnv, args...), " ")
}
func commitToHistory(img *Image, msg string, withLayer bool, st *llb.State) error {
if st != nil {
msg += " # buildkit"
}
img.History = append(img.History, ocispec.History{
CreatedBy: msg,
Comment: historyComment,
EmptyLayer: !withLayer,
})
return nil
}
func isReachable(from, to *dispatchState) (ret bool) {
if from == nil {
return false
}
if from == to || isReachable(from.base, to) {
return true
}
for d := range from.deps {
if isReachable(d, to) {
return true
}
}
return false
}
func parseUser(str string) (uid uint32, gid uint32, err error) {
if str == "" {
return 0, 0, nil
}
parts := strings.SplitN(str, ":", 2)
for i, v := range parts {
switch i {
case 0:
uid, err = parseUID(v)
if err != nil {
return 0, 0, err
}
if len(parts) == 1 {
gid = uid
}
case 1:
gid, err = parseUID(v)
if err != nil {
return 0, 0, err
}
}
}
return
}
func parseUID(str string) (uint32, error) {
if str == "root" {
return 0, nil
}
uid, err := strconv.ParseUint(str, 10, 32)
if err != nil {
return 0, err
}
return uint32(uid), nil
}
func normalizeContextPaths(paths map[string]struct{}) []string {
pathSlice := make([]string, 0, len(paths))
for p := range paths {
if p == "/" {
return nil
}
pathSlice = append(pathSlice, p)
}
toDelete := map[string]struct{}{}
for i := range pathSlice {
for j := range pathSlice {
if i == j {
continue
}
if strings.HasPrefix(pathSlice[j], pathSlice[i]+"/") {
delete(paths, pathSlice[j])
}
}
}
toSort := make([]string, 0, len(paths))
for p := range paths {
if _, ok := toDelete[p]; !ok {
toSort = append(toSort, path.Join(".", p))
}
}
sort.Slice(toSort, func(i, j int) bool {
return toSort[i] < toSort[j]
})
return toSort
}
func proxyEnvFromBuildArgs(args map[string]string) *llb.ProxyEnv {
pe := &llb.ProxyEnv{}
isNil := true
for k, v := range args {
if strings.EqualFold(k, "http_proxy") {
pe.HttpProxy = v
isNil = false
}
if strings.EqualFold(k, "https_proxy") {
pe.HttpsProxy = v
isNil = false
}
if strings.EqualFold(k, "ftp_proxy") {
pe.FtpProxy = v
isNil = false
}
if strings.EqualFold(k, "no_proxy") {
pe.NoProxy = v
isNil = false
}
}
if isNil {
return nil
}
return pe
}
type mutableOutput struct {
llb.Output
}
func withShell(img Image, args []string) []string {
var shell []string
if len(img.Config.Shell) > 0 {
shell = append([]string{}, img.Config.Shell...)
} else {
shell = defaultShell()
}
return append(shell, strings.Join(args, " "))
}

View file

@ -0,0 +1,16 @@
// +build !dfrunmount,!dfextall
package dockerfile2llb
import (
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
)
func detectRunMount(cmd *command, dispatchStatesByName map[string]*dispatchState, allDispatchStates []*dispatchState) bool {
return false
}
func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*dispatchState, opt dispatchOpt) []llb.RunOption {
return nil
}

View file

@ -0,0 +1,74 @@
// +build dfrunmount dfextall
package dockerfile2llb
import (
"path"
"path/filepath"
"strings"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
)
func detectRunMount(cmd *command, dispatchStatesByName map[string]*dispatchState, allDispatchStates []*dispatchState) bool {
if c, ok := cmd.Command.(*instructions.RunCommand); ok {
mounts := instructions.GetMounts(c)
sources := make([]*dispatchState, len(mounts))
for i, mount := range mounts {
if mount.From == "" && mount.Type == instructions.MountTypeCache {
mount.From = emptyImageName
}
from := mount.From
if from == "" || mount.Type == instructions.MountTypeTmpfs {
continue
}
stn, ok := dispatchStatesByName[strings.ToLower(from)]
if !ok {
stn = &dispatchState{
stage: instructions.Stage{BaseName: from},
deps: make(map[*dispatchState]struct{}),
unregistered: true,
}
}
sources[i] = stn
}
cmd.sources = sources
return true
}
return false
}
func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*dispatchState, opt dispatchOpt) []llb.RunOption {
var out []llb.RunOption
mounts := instructions.GetMounts(c)
for i, mount := range mounts {
if mount.From == "" && mount.Type == instructions.MountTypeCache {
mount.From = emptyImageName
}
st := opt.buildContext
if mount.From != "" {
st = sources[i].state
}
var mountOpts []llb.MountOption
if mount.Type == instructions.MountTypeTmpfs {
st = llb.Scratch()
mountOpts = append(mountOpts, llb.Tmpfs())
}
if mount.ReadOnly {
mountOpts = append(mountOpts, llb.Readonly)
}
if mount.Type == instructions.MountTypeCache {
mountOpts = append(mountOpts, llb.AsPersistentCacheDir(opt.cacheIDNamespace+"/"+mount.CacheID))
}
if src := path.Join("/", mount.Source); src != "/" {
mountOpts = append(mountOpts, llb.SourcePath(src))
}
out = append(out, llb.AddMount(path.Join("/", mount.Target), st, mountOpts...))
d.ctxPaths[path.Join("/", filepath.ToSlash(mount.Source))] = struct{}{}
}
return out
}

View file

@ -0,0 +1,7 @@
// +build !windows
package dockerfile2llb
func defaultShell() []string {
return []string{"/bin/sh", "-c"}
}

View file

@ -0,0 +1,7 @@
// +build windows
package dockerfile2llb
func defaultShell() []string {
return []string{"cmd", "/S", "/C"}
}

View file

@ -0,0 +1,38 @@
package dockerfile2llb
import (
"bufio"
"io"
"regexp"
"strings"
)
const keySyntax = "syntax"
var reDirective = regexp.MustCompile(`^#\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`)
func DetectSyntax(r io.Reader) (string, string, bool) {
directives := ParseDirectives(r)
if len(directives) == 0 {
return "", "", false
}
v, ok := directives[keySyntax]
if !ok {
return "", "", false
}
p := strings.SplitN(v, " ", 2)
return p[0], v, true
}
func ParseDirectives(r io.Reader) map[string]string {
m := map[string]string{}
s := bufio.NewScanner(r)
for s.Scan() {
match := reDirective.FindStringSubmatch(s.Text())
if len(match) == 0 {
return m
}
m[strings.ToLower(match[1])] = match[2]
}
return m
}

View file

@ -0,0 +1,75 @@
package dockerfile2llb
import (
"runtime"
"time"
"github.com/docker/docker/api/types/strslice"
"github.com/moby/buildkit/util/system"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
type HealthConfig struct {
// Test is the test to perform to check that the container is healthy.
// An empty slice means to inherit the default.
// The options are:
// {} : inherit healthcheck
// {"NONE"} : disable healthcheck
// {"CMD", args...} : exec arguments directly
// {"CMD-SHELL", command} : run command with system's default shell
Test []string `json:",omitempty"`
// Zero means to inherit. Durations are expressed as integer nanoseconds.
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
// Zero means inherit.
Retries int `json:",omitempty"`
}
type ImageConfig struct {
ocispec.ImageConfig
Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
// NetworkDisabled bool `json:",omitempty"` // Is network disabled
// MacAddress string `json:",omitempty"` // Mac Address of the container
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
}
// Image is the JSON structure which describes some basic information about the image.
// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON.
type Image struct {
ocispec.Image
// Config defines the execution parameters which should be used as a base when running a container using the image.
Config ImageConfig `json:"config,omitempty"`
}
func clone(src Image) Image {
img := src
img.Config = src.Config
img.Config.Env = append([]string{}, src.Config.Env...)
img.Config.Cmd = append([]string{}, src.Config.Cmd...)
img.Config.Entrypoint = append([]string{}, src.Config.Entrypoint...)
return img
}
func emptyImage() Image {
img := Image{
Image: ocispec.Image{
Architecture: runtime.GOARCH,
OS: runtime.GOOS,
},
}
img.RootFS.Type = "layers"
img.Config.WorkingDir = "/"
img.Config.Env = []string{"PATH=" + system.DefaultPathEnv}
return img
}

View file

@ -0,0 +1,86 @@
package dockerfile
import (
"context"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/worker"
"github.com/pkg/errors"
)
func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (*bridgeClient, error) {
return &bridgeClient{opts: opts, FrontendLLBBridge: llbBridge, sid: session.FromContext(ctx)}, nil
}
type bridgeClient struct {
frontend.FrontendLLBBridge
opts map[string]string
final *ref
sid string
exporterAttr map[string][]byte
refs []*ref
}
func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest, exporterAttr map[string][]byte, final bool) (client.Reference, error) {
r, exporterAttrRes, err := c.FrontendLLBBridge.Solve(ctx, frontend.SolveRequest{
Definition: req.Definition,
Frontend: req.Frontend,
FrontendOpt: req.FrontendOpt,
ImportCacheRefs: req.ImportCacheRefs,
})
if err != nil {
return nil, err
}
rr := &ref{r}
c.refs = append(c.refs, rr)
if final {
c.final = rr
if exporterAttr == nil {
exporterAttr = make(map[string][]byte)
}
for k, v := range exporterAttrRes {
exporterAttr[k] = v
}
c.exporterAttr = exporterAttr
}
return rr, nil
}
func (c *bridgeClient) Opts() map[string]string {
return c.opts
}
func (c *bridgeClient) SessionID() string {
return c.sid
}
type ref struct {
solver.CachedResult
}
func (r *ref) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) {
ref, err := r.getImmutableRef()
if err != nil {
return nil, err
}
newReq := cache.ReadRequest{
Filename: req.Filename,
}
if r := req.Range; r != nil {
newReq.Range = &cache.FileRange{
Offset: r.Offset,
Length: r.Length,
}
}
return cache.ReadFile(ctx, ref, newReq)
}
func (r *ref) getImmutableRef() (cache.ImmutableRef, error) {
ref, ok := r.CachedResult.Sys().(*worker.WorkerRef)
if !ok {
return nil, errors.Errorf("invalid ref: %T", r.CachedResult.Sys())
}
return ref.ImmutableRef, nil
}

View file

@ -11,6 +11,7 @@ type FlagType int
const (
boolType FlagType = iota
stringType
stringsType
)
// BFlags contains all flags information for the builder
@ -23,10 +24,11 @@ type BFlags struct {
// Flag contains all information for a flag
type Flag struct {
bf *BFlags
name string
flagType FlagType
Value string
bf *BFlags
name string
flagType FlagType
Value string
StringValues []string
}
// NewBFlags returns the new BFlags struct
@ -70,6 +72,15 @@ func (bf *BFlags) AddString(name string, def string) *Flag {
return flag
}
// AddString adds a string flag to BFlags that can match multiple values
func (bf *BFlags) AddStrings(name string) *Flag {
flag := bf.addFlag(name, stringsType)
if flag == nil {
return nil
}
return flag
}
// addFlag is a generic func used by the other AddXXX() func
// to add a new flag to the BFlags struct.
// Note, any error will be generated when Parse() is called (see Parse).
@ -145,7 +156,7 @@ func (bf *BFlags) Parse() error {
return fmt.Errorf("Unknown flag: %s", arg)
}
if _, ok = bf.used[arg]; ok {
if _, ok = bf.used[arg]; ok && flag.flagType != stringsType {
return fmt.Errorf("Duplicate flag specified: %s", arg)
}
@ -173,6 +184,12 @@ func (bf *BFlags) Parse() error {
}
flag.Value = value
case stringsType:
if index < 0 {
return fmt.Errorf("Missing a value on flag: %s", arg)
}
flag.StringValues = append(flag.StringValues, value)
default:
panic("No idea what kind of flag we have! Should never get here!")
}

View file

@ -233,6 +233,7 @@ type ShellDependantCmdLine struct {
//
type RunCommand struct {
withNameAndCode
withExternalData
ShellDependantCmdLine
}
@ -416,3 +417,18 @@ func HasStage(s []Stage, name string) (int, bool) {
}
return -1, false
}
type withExternalData struct {
m map[interface{}]interface{}
}
func (c *withExternalData) getExternalValue(k interface{}) interface{} {
return c.m[k]
}
func (c *withExternalData) setExternalValue(k, v interface{}) {
if c.m == nil {
c.m = map[interface{}]interface{}{}
}
c.m[k] = v
}

View file

@ -0,0 +1,161 @@
// +build dfrunmount dfextall
package instructions
import (
"encoding/csv"
"strconv"
"strings"
"github.com/pkg/errors"
)
const MountTypeBind = "bind"
const MountTypeCache = "cache"
const MountTypeTmpfs = "tmpfs"
var allowedMountTypes = map[string]struct{}{
MountTypeBind: {},
MountTypeCache: {},
MountTypeTmpfs: {},
}
type mountsKeyT string
var mountsKey = mountsKeyT("dockerfile/run/mounts")
func init() {
parseRunPreHooks = append(parseRunPreHooks, runMountPreHook)
parseRunPostHooks = append(parseRunPostHooks, runMountPostHook)
}
func isValidMountType(s string) bool {
_, ok := allowedMountTypes[s]
return ok
}
func runMountPreHook(cmd *RunCommand, req parseRequest) error {
st := &mountState{}
st.flag = req.flags.AddStrings("mount")
cmd.setExternalValue(mountsKey, st)
return nil
}
func runMountPostHook(cmd *RunCommand, req parseRequest) error {
st := getMountState(cmd)
if st == nil {
return errors.Errorf("no mount state")
}
var mounts []*Mount
for _, str := range st.flag.StringValues {
m, err := parseMount(str)
if err != nil {
return err
}
mounts = append(mounts, m)
}
st.mounts = mounts
return nil
}
func getMountState(cmd *RunCommand) *mountState {
v := cmd.getExternalValue(mountsKey)
if v == nil {
return nil
}
return v.(*mountState)
}
func GetMounts(cmd *RunCommand) []*Mount {
return getMountState(cmd).mounts
}
type mountState struct {
flag *Flag
mounts []*Mount
}
type Mount struct {
Type string
From string
Source string
Target string
ReadOnly bool
CacheID string
}
func parseMount(value string) (*Mount, error) {
csvReader := csv.NewReader(strings.NewReader(value))
fields, err := csvReader.Read()
if err != nil {
return nil, errors.Wrap(err, "failed to parse csv mounts")
}
m := &Mount{Type: MountTypeBind}
roAuto := true
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
key := strings.ToLower(parts[0])
if len(parts) == 1 {
switch key {
case "readonly", "ro":
m.ReadOnly = true
roAuto = false
continue
case "readwrite", "rw":
m.ReadOnly = false
roAuto = false
continue
}
}
if len(parts) != 2 {
return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field)
}
value := parts[1]
switch key {
case "type":
if !isValidMountType(strings.ToLower(value)) {
return nil, errors.Errorf("invalid mount type %q", value)
}
m.Type = strings.ToLower(value)
case "from":
m.From = value
case "source", "src":
m.Source = value
case "target", "dst", "destination":
m.Target = value
case "readonly", "ro":
m.ReadOnly, err = strconv.ParseBool(value)
if err != nil {
return nil, errors.Errorf("invalid value for %s: %s", key, value)
}
roAuto = false
case "readwrite", "rw":
rw, err := strconv.ParseBool(value)
if err != nil {
return nil, errors.Errorf("invalid value for %s: %s", key, value)
}
m.ReadOnly = !rw
roAuto = false
case "id":
m.CacheID = value
default:
return nil, errors.Errorf("unexpected key '%s' in '%s'", key, field)
}
}
if roAuto {
if m.Type == MountTypeCache {
m.ReadOnly = false
} else {
m.ReadOnly = true
}
}
return m, nil
}

View file

@ -24,6 +24,9 @@ type parseRequest struct {
original string
}
var parseRunPreHooks []func(*RunCommand, parseRequest) error
var parseRunPostHooks []func(*RunCommand, parseRequest) error
func nodeArgs(node *parser.Node) []string {
result := []string{}
for ; node.Next != nil; node = node.Next {
@ -355,15 +358,28 @@ func parseShellDependentCommand(req parseRequest, emptyAsNil bool) ShellDependan
}
func parseRun(req parseRequest) (*RunCommand, error) {
cmd := &RunCommand{}
for _, fn := range parseRunPreHooks {
if err := fn(cmd, req); err != nil {
return nil, err
}
}
if err := req.flags.Parse(); err != nil {
return nil, err
}
return &RunCommand{
ShellDependantCmdLine: parseShellDependentCommand(req, false),
withNameAndCode: newWithNameAndCode(req),
}, nil
cmd.ShellDependantCmdLine = parseShellDependentCommand(req, false)
cmd.withNameAndCode = newWithNameAndCode(req)
for _, fn := range parseRunPostHooks {
if err := fn(cmd, req); err != nil {
return nil, err
}
}
return cmd, nil
}
func parseCmd(req parseRequest) (*CmdCommand, error) {

29
vendor/github.com/moby/buildkit/frontend/frontend.go generated vendored Normal file
View file

@ -0,0 +1,29 @@
package frontend
import (
"context"
"io"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/pb"
digest "github.com/opencontainers/go-digest"
)
type Frontend interface {
Solve(ctx context.Context, llb FrontendLLBBridge, opt map[string]string) (solver.CachedResult, map[string][]byte, error)
}
type FrontendLLBBridge interface {
Solve(ctx context.Context, req SolveRequest) (solver.CachedResult, map[string][]byte, error)
ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error)
Exec(ctx context.Context, meta executor.Meta, rootfs cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error
}
type SolveRequest struct {
Definition *pb.Definition
Frontend string
FrontendOpt map[string]string
ImportCacheRefs []string
}

View file

@ -0,0 +1,40 @@
package client
import (
"context"
"github.com/moby/buildkit/solver/pb"
digest "github.com/opencontainers/go-digest"
)
// TODO: make this take same options as LLBBridge. Add Return()
type Client interface {
Solve(ctx context.Context, req SolveRequest, exporterAttr map[string][]byte, final bool) (Reference, error)
ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error)
Opts() map[string]string
SessionID() string
}
type Reference interface {
ReadFile(ctx context.Context, req ReadRequest) ([]byte, error)
// StatFile(ctx context.Context, req StatRequest) (*StatResponse, error)
// ReadDir(ctx context.Context, req ReadDirRequest) ([]*StatResponse, error)
}
type ReadRequest struct {
Filename string
Range *FileRange
}
type FileRange struct {
Offset int
Length int
}
// SolveRequest is same as frontend.SolveRequest but avoiding dependency
type SolveRequest struct {
Definition *pb.Definition
Frontend string
FrontendOpt map[string]string
ImportCacheRefs []string
}

View file

@ -0,0 +1,348 @@
package gateway
import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"os"
"strings"
"time"
"github.com/docker/distribution/reference"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/frontend"
pb "github.com/moby/buildkit/frontend/gateway/pb"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/tracing"
"github.com/moby/buildkit/worker"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/http2"
"google.golang.org/grpc"
"google.golang.org/grpc/health"
"google.golang.org/grpc/health/grpc_health_v1"
)
const (
keySource = "source"
keyDevel = "gateway-devel"
exporterImageConfig = "containerimage.config"
)
func NewGatewayFrontend() frontend.Frontend {
return &gatewayFrontend{}
}
type gatewayFrontend struct {
}
func filterPrefix(opts map[string]string, pfx string) map[string]string {
m := map[string]string{}
for k, v := range opts {
if strings.HasPrefix(k, pfx) {
m[strings.TrimPrefix(k, pfx)] = v
}
}
return m
}
func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (retRef solver.CachedResult, exporterAttr map[string][]byte, retErr error) {
source, ok := opts[keySource]
if !ok {
return nil, nil, errors.Errorf("no source specified for gateway")
}
sid := session.FromContext(ctx)
_, isDevel := opts[keyDevel]
var img ocispec.Image
var rootFS cache.ImmutableRef
var readonly bool // TODO: try to switch to read-only by default.
if isDevel {
ref, exp, err := llbBridge.Solve(session.NewContext(ctx, "gateway:"+sid),
frontend.SolveRequest{
Frontend: source,
FrontendOpt: filterPrefix(opts, "gateway-"),
})
if err != nil {
return nil, nil, err
}
defer ref.Release(context.TODO())
workerRef, ok := ref.Sys().(*worker.WorkerRef)
if !ok {
return nil, nil, errors.Errorf("invalid ref: %T", ref.Sys())
}
rootFS = workerRef.ImmutableRef
config, ok := exp[exporterImageConfig]
if ok {
if err := json.Unmarshal(config, &img); err != nil {
return nil, nil, err
}
}
} else {
sourceRef, err := reference.ParseNormalizedNamed(source)
if err != nil {
return nil, nil, err
}
dgst, config, err := llbBridge.ResolveImageConfig(ctx, reference.TagNameOnly(sourceRef).String())
if err != nil {
return nil, nil, err
}
if err := json.Unmarshal(config, &img); err != nil {
return nil, nil, err
}
sourceRef, err = reference.WithDigest(sourceRef, dgst)
if err != nil {
return nil, nil, err
}
src := llb.Image(sourceRef.String())
def, err := src.Marshal()
if err != nil {
return nil, nil, err
}
ref, _, err := llbBridge.Solve(ctx, frontend.SolveRequest{
Definition: def.ToPB(),
})
if err != nil {
return nil, nil, err
}
defer ref.Release(context.TODO())
workerRef, ok := ref.Sys().(*worker.WorkerRef)
if !ok {
return nil, nil, errors.Errorf("invalid ref: %T", ref.Sys())
}
rootFS = workerRef.ImmutableRef
}
lbf, err := newLLBBridgeForwarder(ctx, llbBridge)
defer lbf.conn.Close()
if err != nil {
return nil, nil, err
}
args := []string{"/run"}
env := []string{}
cwd := "/"
if img.Config.Env != nil {
env = img.Config.Env
}
if img.Config.Entrypoint != nil {
args = img.Config.Entrypoint
}
if img.Config.WorkingDir != "" {
cwd = img.Config.WorkingDir
}
i := 0
for k, v := range opts {
env = append(env, fmt.Sprintf("BUILDKIT_FRONTEND_OPT_%d", i)+"="+k+"="+v)
i++
}
env = append(env, "BUILDKIT_SESSION_ID="+sid)
defer func() {
for _, r := range lbf.refs {
if r != nil && (lbf.lastRef != r || retErr != nil) {
r.Release(context.TODO())
}
}
}()
err = llbBridge.Exec(ctx, executor.Meta{
Env: env,
Args: args,
Cwd: cwd,
ReadonlyRootFS: readonly,
}, rootFS, lbf.Stdin, lbf.Stdout, os.Stderr)
if err != nil {
return nil, nil, err
}
return lbf.lastRef, lbf.exporterAttr, nil
}
func newLLBBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge) (*llbBridgeForwarder, error) {
lbf := &llbBridgeForwarder{
callCtx: ctx,
llbBridge: llbBridge,
refs: map[string]solver.Result{},
pipe: newPipe(),
}
server := grpc.NewServer()
grpc_health_v1.RegisterHealthServer(server, health.NewServer())
pb.RegisterLLBBridgeServer(server, lbf)
go serve(ctx, server, lbf.conn)
return lbf, nil
}
type pipe struct {
Stdin io.ReadCloser
Stdout io.WriteCloser
conn net.Conn
}
func newPipe() *pipe {
pr1, pw1, _ := os.Pipe()
pr2, pw2, _ := os.Pipe()
return &pipe{
Stdin: pr1,
Stdout: pw2,
conn: &conn{
Reader: pr2,
Writer: pw1,
Closer: pw2,
},
}
}
type conn struct {
io.Reader
io.Writer
io.Closer
}
func (s *conn) LocalAddr() net.Addr {
return dummyAddr{}
}
func (s *conn) RemoteAddr() net.Addr {
return dummyAddr{}
}
func (s *conn) SetDeadline(t time.Time) error {
return nil
}
func (s *conn) SetReadDeadline(t time.Time) error {
return nil
}
func (s *conn) SetWriteDeadline(t time.Time) error {
return nil
}
type dummyAddr struct {
}
func (d dummyAddr) Network() string {
return "pipe"
}
func (d dummyAddr) String() string {
return "localhost"
}
type llbBridgeForwarder struct {
callCtx context.Context
llbBridge frontend.FrontendLLBBridge
refs map[string]solver.Result
lastRef solver.CachedResult
exporterAttr map[string][]byte
*pipe
}
func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.ResolveImageConfigRequest) (*pb.ResolveImageConfigResponse, error) {
ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx)
dgst, dt, err := lbf.llbBridge.ResolveImageConfig(ctx, req.Ref)
if err != nil {
return nil, err
}
return &pb.ResolveImageConfigResponse{
Digest: dgst,
Config: dt,
}, nil
}
func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) (*pb.SolveResponse, error) {
ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx)
ref, expResp, err := lbf.llbBridge.Solve(ctx, frontend.SolveRequest{
Definition: req.Definition,
Frontend: req.Frontend,
FrontendOpt: req.FrontendOpt,
ImportCacheRefs: req.ImportCacheRefs,
})
if err != nil {
return nil, err
}
exp := map[string][]byte{}
if err := json.Unmarshal(req.ExporterAttr, &exp); err != nil {
return nil, err
}
if expResp != nil {
for k, v := range expResp {
exp[k] = v
}
}
id := identity.NewID()
lbf.refs[id] = ref
if req.Final {
lbf.lastRef = ref
lbf.exporterAttr = exp
}
if ref == nil {
id = ""
}
return &pb.SolveResponse{Ref: id}, nil
}
func (lbf *llbBridgeForwarder) ReadFile(ctx context.Context, req *pb.ReadFileRequest) (*pb.ReadFileResponse, error) {
ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx)
ref, ok := lbf.refs[req.Ref]
if !ok {
return nil, errors.Errorf("no such ref: %v", req.Ref)
}
if ref == nil {
return nil, errors.Wrapf(os.ErrNotExist, "%s no found", req.FilePath)
}
workerRef, ok := ref.Sys().(*worker.WorkerRef)
if !ok {
return nil, errors.Errorf("invalid ref: %T", ref.Sys())
}
newReq := cache.ReadRequest{
Filename: req.FilePath,
}
if r := req.Range; r != nil {
newReq.Range = &cache.FileRange{
Offset: int(r.Offset),
Length: int(r.Length),
}
}
dt, err := cache.ReadFile(ctx, workerRef.ImmutableRef, newReq)
if err != nil {
return nil, err
}
return &pb.ReadFileResponse{Data: dt}, nil
}
func (lbf *llbBridgeForwarder) Ping(context.Context, *pb.PingRequest) (*pb.PongResponse, error) {
return &pb.PongResponse{}, nil
}
func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) {
go func() {
<-ctx.Done()
conn.Close()
}()
logrus.Debugf("serving grpc connection")
(&http2.Server{}).ServeConn(conn, &http2.ServeConnOpts{Handler: grpcServer})
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,60 @@
syntax = "proto3";
package moby.buildkit.v1.frontend;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "github.com/moby/buildkit/solver/pb/ops.proto";
option (gogoproto.sizer_all) = true;
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
service LLBBridge {
rpc ResolveImageConfig(ResolveImageConfigRequest) returns (ResolveImageConfigResponse);
rpc Solve(SolveRequest) returns (SolveResponse);
rpc ReadFile(ReadFileRequest) returns (ReadFileResponse);
rpc Ping(PingRequest) returns (PongResponse);
}
message ResolveImageConfigRequest {
string Ref = 1;
}
message ResolveImageConfigResponse {
string Digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
bytes Config = 2;
}
message SolveRequest {
pb.Definition Definition = 1;
string Frontend = 2;
map<string, string> FrontendOpt = 3;
repeated string ImportCacheRefs = 4;
bool Final = 10;
bytes ExporterAttr = 11;
}
message SolveResponse {
string Ref = 1; // can be used by readfile request
bytes ExporterAttr = 2;
}
message ReadFileRequest {
string Ref = 1;
string FilePath = 2;
FileRange Range = 3;
}
message FileRange {
int64 Offset = 1;
int64 Length = 2;
}
message ReadFileResponse {
bytes Data = 1;
}
message PingRequest{
}
message PongResponse{
}

View file

@ -0,0 +1,3 @@
package moby_buildkit_v1_frontend
//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. gateway.proto

26
vendor/github.com/moby/buildkit/session/auth/auth.go generated vendored Normal file
View file

@ -0,0 +1,26 @@
package auth
import (
"context"
"github.com/moby/buildkit/session"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func CredentialsFunc(ctx context.Context, c session.Caller) func(string) (string, string, error) {
return func(host string) (string, string, error) {
client := NewAuthClient(c.Conn())
resp, err := client.Credentials(ctx, &CredentialsRequest{
Host: host,
})
if err != nil {
if st, ok := status.FromError(err); ok && st.Code() == codes.Unimplemented {
return "", "", nil
}
return "", "", err
}
return resp.Username, resp.Secret, nil
}
}

673
vendor/github.com/moby/buildkit/session/auth/auth.pb.go generated vendored Normal file
View file

@ -0,0 +1,673 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: auth.proto
/*
Package auth is a generated protocol buffer package.
It is generated from these files:
auth.proto
It has these top-level messages:
CredentialsRequest
CredentialsResponse
*/
package auth
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import strings "strings"
import reflect "reflect"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type CredentialsRequest struct {
Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"`
}
func (m *CredentialsRequest) Reset() { *m = CredentialsRequest{} }
func (*CredentialsRequest) ProtoMessage() {}
func (*CredentialsRequest) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} }
func (m *CredentialsRequest) GetHost() string {
if m != nil {
return m.Host
}
return ""
}
type CredentialsResponse struct {
Username string `protobuf:"bytes,1,opt,name=Username,proto3" json:"Username,omitempty"`
Secret string `protobuf:"bytes,2,opt,name=Secret,proto3" json:"Secret,omitempty"`
}
func (m *CredentialsResponse) Reset() { *m = CredentialsResponse{} }
func (*CredentialsResponse) ProtoMessage() {}
func (*CredentialsResponse) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} }
func (m *CredentialsResponse) GetUsername() string {
if m != nil {
return m.Username
}
return ""
}
func (m *CredentialsResponse) GetSecret() string {
if m != nil {
return m.Secret
}
return ""
}
func init() {
proto.RegisterType((*CredentialsRequest)(nil), "moby.filesync.v1.CredentialsRequest")
proto.RegisterType((*CredentialsResponse)(nil), "moby.filesync.v1.CredentialsResponse")
}
func (this *CredentialsRequest) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*CredentialsRequest)
if !ok {
that2, ok := that.(CredentialsRequest)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if this.Host != that1.Host {
return false
}
return true
}
func (this *CredentialsResponse) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*CredentialsResponse)
if !ok {
that2, ok := that.(CredentialsResponse)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if this.Username != that1.Username {
return false
}
if this.Secret != that1.Secret {
return false
}
return true
}
func (this *CredentialsRequest) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&auth.CredentialsRequest{")
s = append(s, "Host: "+fmt.Sprintf("%#v", this.Host)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *CredentialsResponse) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&auth.CredentialsResponse{")
s = append(s, "Username: "+fmt.Sprintf("%#v", this.Username)+",\n")
s = append(s, "Secret: "+fmt.Sprintf("%#v", this.Secret)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringAuth(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Auth service
type AuthClient interface {
Credentials(ctx context.Context, in *CredentialsRequest, opts ...grpc.CallOption) (*CredentialsResponse, error)
}
type authClient struct {
cc *grpc.ClientConn
}
func NewAuthClient(cc *grpc.ClientConn) AuthClient {
return &authClient{cc}
}
func (c *authClient) Credentials(ctx context.Context, in *CredentialsRequest, opts ...grpc.CallOption) (*CredentialsResponse, error) {
out := new(CredentialsResponse)
err := grpc.Invoke(ctx, "/moby.filesync.v1.Auth/Credentials", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Auth service
type AuthServer interface {
Credentials(context.Context, *CredentialsRequest) (*CredentialsResponse, error)
}
func RegisterAuthServer(s *grpc.Server, srv AuthServer) {
s.RegisterService(&_Auth_serviceDesc, srv)
}
func _Auth_Credentials_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CredentialsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AuthServer).Credentials(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/moby.filesync.v1.Auth/Credentials",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AuthServer).Credentials(ctx, req.(*CredentialsRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Auth_serviceDesc = grpc.ServiceDesc{
ServiceName: "moby.filesync.v1.Auth",
HandlerType: (*AuthServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Credentials",
Handler: _Auth_Credentials_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "auth.proto",
}
func (m *CredentialsRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CredentialsRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Host) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintAuth(dAtA, i, uint64(len(m.Host)))
i += copy(dAtA[i:], m.Host)
}
return i, nil
}
func (m *CredentialsResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CredentialsResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Username) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintAuth(dAtA, i, uint64(len(m.Username)))
i += copy(dAtA[i:], m.Username)
}
if len(m.Secret) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintAuth(dAtA, i, uint64(len(m.Secret)))
i += copy(dAtA[i:], m.Secret)
}
return i, nil
}
func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *CredentialsRequest) Size() (n int) {
var l int
_ = l
l = len(m.Host)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
return n
}
func (m *CredentialsResponse) Size() (n int) {
var l int
_ = l
l = len(m.Username)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
l = len(m.Secret)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
return n
}
func sovAuth(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozAuth(x uint64) (n int) {
return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *CredentialsRequest) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&CredentialsRequest{`,
`Host:` + fmt.Sprintf("%v", this.Host) + `,`,
`}`,
}, "")
return s
}
func (this *CredentialsResponse) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&CredentialsResponse{`,
`Username:` + fmt.Sprintf("%v", this.Username) + `,`,
`Secret:` + fmt.Sprintf("%v", this.Secret) + `,`,
`}`,
}, "")
return s
}
func valueToStringAuth(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *CredentialsRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CredentialsRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CredentialsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Host = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipAuth(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthAuth
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CredentialsResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CredentialsResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CredentialsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Username = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Secret = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipAuth(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthAuth
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipAuth(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAuth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAuth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAuth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthAuth
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAuth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipAuth(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) }
var fileDescriptorAuth = []byte{
// 224 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0x2c, 0x2d, 0xc9,
0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4, 0x4b, 0xcb, 0xcc,
0x49, 0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x33, 0x54, 0xd2, 0xe0, 0x12, 0x72, 0x2e, 0x4a, 0x4d,
0x49, 0xcd, 0x2b, 0xc9, 0x4c, 0xcc, 0x29, 0x0e, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0x12,
0xe2, 0x62, 0xf1, 0xc8, 0x2f, 0x2e, 0x91, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x95,
0x3c, 0xb9, 0x84, 0x51, 0x54, 0x16, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x0a, 0x49, 0x71, 0x71, 0x84,
0x16, 0xa7, 0x16, 0xe5, 0x25, 0xe6, 0xa6, 0x42, 0x95, 0xc3, 0xf9, 0x42, 0x62, 0x5c, 0x6c, 0xc1,
0xa9, 0xc9, 0x45, 0xa9, 0x25, 0x12, 0x4c, 0x60, 0x19, 0x28, 0xcf, 0x28, 0x89, 0x8b, 0xc5, 0xb1,
0xb4, 0x24, 0x43, 0x28, 0x8a, 0x8b, 0x1b, 0xc9, 0x48, 0x21, 0x15, 0x3d, 0x74, 0xe7, 0xe9, 0x61,
0xba, 0x4d, 0x4a, 0x95, 0x80, 0x2a, 0x88, 0xbb, 0x9c, 0x8c, 0x2e, 0x3c, 0x94, 0x63, 0xb8, 0xf1,
0x50, 0x8e, 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e,
0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31,
0x7c, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0x43, 0x14, 0x0b, 0x28, 0x90, 0x92, 0xd8, 0xc0,
0xa1, 0x64, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x73, 0xf3, 0xd5, 0x33, 0x01, 0x00, 0x00,
}

View file

@ -0,0 +1,19 @@
syntax = "proto3";
package moby.filesync.v1;
option go_package = "auth";
service Auth{
rpc Credentials(CredentialsRequest) returns (CredentialsResponse);
}
message CredentialsRequest {
string Host = 1;
}
message CredentialsResponse {
string Username = 1;
string Secret = 2;
}

View file

@ -0,0 +1,3 @@
package auth
//go:generate protoc --gogoslick_out=plugins=grpc:. auth.proto

View file

@ -12,10 +12,11 @@ import (
"google.golang.org/grpc"
)
func sendDiffCopy(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb, _map func(*fsutil.Stat) bool) error {
func sendDiffCopy(stream grpc.Stream, dir string, includes, excludes, followPaths []string, progress progressCb, _map func(*fsutil.Stat) bool) error {
return fsutil.Send(stream.Context(), stream, dir, &fsutil.WalkOpt{
ExcludePatterns: excludes,
IncludePatterns: includes,
FollowPaths: followPaths,
Map: _map,
}, progress)
}

View file

@ -18,6 +18,7 @@ const (
keyOverrideExcludes = "override-excludes"
keyIncludePatterns = "include-patterns"
keyExcludePatterns = "exclude-patterns"
keyFollowPaths = "followpaths"
keyDirName = "dir-name"
)
@ -87,6 +88,8 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retEr
}
includes := opts[keyIncludePatterns]
followPaths := opts[keyFollowPaths]
var progress progressCb
if sp.p != nil {
progress = sp.p
@ -98,7 +101,7 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retEr
doneCh = sp.doneCh
sp.doneCh = nil
}
err := pr.sendFn(stream, dir.Dir, includes, excludes, progress, dir.Map)
err := pr.sendFn(stream, dir.Dir, includes, excludes, followPaths, progress, dir.Map)
if doneCh != nil {
if err != nil {
doneCh <- err
@ -117,7 +120,7 @@ type progressCb func(int, bool)
type protocol struct {
name string
sendFn func(stream grpc.Stream, srcDir string, includes, excludes []string, progress progressCb, _map func(*fsutil.Stat) bool) error
sendFn func(stream grpc.Stream, srcDir string, includes, excludes, followPaths []string, progress progressCb, _map func(*fsutil.Stat) bool) error
recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater, progress progressCb) error
}
@ -142,6 +145,7 @@ type FSSendRequestOpt struct {
Name string
IncludePatterns []string
ExcludePatterns []string
FollowPaths []string
OverrideExcludes bool // deprecated: this is used by docker/cli for automatically loading .dockerignore from the directory
DestDir string
CacheUpdater CacheUpdater
@ -181,6 +185,10 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error {
opts[keyExcludePatterns] = opt.ExcludePatterns
}
if opt.FollowPaths != nil {
opts[keyFollowPaths] = opt.FollowPaths
}
opts[keyDirName] = []string{opt.Name}
ctx, cancel := context.WithCancel(ctx)
@ -261,7 +269,7 @@ func CopyToCaller(ctx context.Context, srcPath string, c session.Caller, progres
return err
}
return sendDiffCopy(cc, srcPath, nil, nil, progress, nil)
return sendDiffCopy(cc, srcPath, nil, nil, nil, progress, nil)
}
func CopyFileWriter(ctx context.Context, c session.Caller) (io.WriteCloser, error) {

View file

@ -0,0 +1,156 @@
package grpchijack
import (
"context"
"io"
"net"
"strings"
"sync"
"time"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/session"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
func Dialer(api controlapi.ControlClient) session.Dialer {
return func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) {
meta = lowerHeaders(meta)
md := metadata.MD(meta)
ctx = metadata.NewOutgoingContext(ctx, md)
stream, err := api.Session(ctx)
if err != nil {
return nil, err
}
c, _ := streamToConn(stream)
return c, nil
}
}
func streamToConn(stream grpc.Stream) (net.Conn, <-chan struct{}) {
closeCh := make(chan struct{})
c := &conn{stream: stream, buf: make([]byte, 32*1<<10), closeCh: closeCh}
return c, closeCh
}
type conn struct {
stream grpc.Stream
buf []byte
lastBuf []byte
closedOnce sync.Once
readMu sync.Mutex
err error
closeCh chan struct{}
}
func (c *conn) Read(b []byte) (n int, err error) {
c.readMu.Lock()
defer c.readMu.Unlock()
if c.lastBuf != nil {
n := copy(b, c.lastBuf)
c.lastBuf = c.lastBuf[n:]
if len(c.lastBuf) == 0 {
c.lastBuf = nil
}
return n, nil
}
m := new(controlapi.BytesMessage)
m.Data = c.buf
if err := c.stream.RecvMsg(m); err != nil {
return 0, err
}
c.buf = m.Data[:cap(m.Data)]
n = copy(b, m.Data)
if n < len(m.Data) {
c.lastBuf = m.Data[n:]
}
return n, nil
}
func (c *conn) Write(b []byte) (int, error) {
m := &controlapi.BytesMessage{Data: b}
if err := c.stream.SendMsg(m); err != nil {
return 0, err
}
return len(b), nil
}
func (c *conn) Close() (err error) {
c.closedOnce.Do(func() {
defer func() {
close(c.closeCh)
}()
if cs, ok := c.stream.(grpc.ClientStream); ok {
err = cs.CloseSend()
if err != nil {
return
}
}
c.readMu.Lock()
for {
m := new(controlapi.BytesMessage)
m.Data = c.buf
err = c.stream.RecvMsg(m)
if err != nil {
if err != io.EOF {
return
}
err = nil
break
}
c.buf = m.Data[:cap(m.Data)]
c.lastBuf = append(c.lastBuf, c.buf...)
}
c.readMu.Unlock()
})
return nil
}
func (c *conn) LocalAddr() net.Addr {
return dummyAddr{}
}
func (c *conn) RemoteAddr() net.Addr {
return dummyAddr{}
}
func (c *conn) SetDeadline(t time.Time) error {
return nil
}
func (c *conn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *conn) SetWriteDeadline(t time.Time) error {
return nil
}
type dummyAddr struct {
}
func (d dummyAddr) Network() string {
return "tcp"
}
func (d dummyAddr) String() string {
return "localhost"
}
func lowerHeaders(in map[string][]string) map[string][]string {
out := map[string][]string{}
for k := range in {
out[strings.ToLower(k)] = in[k]
}
return out
}

View file

@ -0,0 +1,14 @@
package grpchijack
import (
"net"
controlapi "github.com/moby/buildkit/api/services/control"
"google.golang.org/grpc/metadata"
)
func Hijack(stream controlapi.Control_SessionServer) (net.Conn, <-chan struct{}, map[string][]string) {
md, _ := metadata.FromIncomingContext(stream.Context())
c, closeCh := streamToConn(stream)
return c, closeCh, md
}

View file

@ -0,0 +1,129 @@
package blobmapping
import (
"context"
"github.com/boltdb/bolt"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/snapshots"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/snapshot"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
const blobKey = "blobmapping.blob"
type Opt struct {
Content content.Store
Snapshotter snapshot.SnapshotterBase
MetadataStore *metadata.Store
}
type Info struct {
snapshots.Info
Blob string
}
type DiffPair struct {
Blobsum digest.Digest
DiffID digest.Digest
}
// this snapshotter keeps an internal mapping between a snapshot and a blob
type Snapshotter struct {
snapshot.SnapshotterBase
opt Opt
}
func NewSnapshotter(opt Opt) snapshot.Snapshotter {
s := &Snapshotter{
SnapshotterBase: opt.Snapshotter,
opt: opt,
}
return s
}
// Remove also removes a reference to a blob. If it is a last reference then it deletes it the blob as well
// Remove is not safe to be called concurrently
func (s *Snapshotter) Remove(ctx context.Context, key string) error {
_, blob, err := s.GetBlob(ctx, key)
if err != nil {
return err
}
blobs, err := s.opt.MetadataStore.Search(index(blob))
if err != nil {
return err
}
if err := s.SnapshotterBase.Remove(ctx, key); err != nil {
return err
}
if len(blobs) == 1 && blobs[0].ID() == key { // last snapshot
if err := s.opt.Content.Delete(ctx, blob); err != nil {
logrus.Errorf("failed to delete blob %v: %+v", blob, err)
}
}
return nil
}
func (s *Snapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) {
u, err := s.SnapshotterBase.Usage(ctx, key)
if err != nil {
return snapshots.Usage{}, err
}
_, blob, err := s.GetBlob(ctx, key)
if err != nil {
return u, err
}
if blob != "" {
info, err := s.opt.Content.Info(ctx, blob)
if err != nil {
return u, err
}
(&u).Add(snapshots.Usage{Size: info.Size, Inodes: 1})
}
return u, nil
}
func (s *Snapshotter) GetBlob(ctx context.Context, key string) (digest.Digest, digest.Digest, error) {
md, _ := s.opt.MetadataStore.Get(key)
v := md.Get(blobKey)
if v == nil {
return "", "", nil
}
var blob DiffPair
if err := v.Unmarshal(&blob); err != nil {
return "", "", err
}
return blob.DiffID, blob.Blobsum, nil
}
// Validates that there is no blob associated with the snapshot.
// Checks that there is a blob in the content store.
// If same blob has already been set then this is a noop.
func (s *Snapshotter) SetBlob(ctx context.Context, key string, diffID, blobsum digest.Digest) error {
_, err := s.opt.Content.Info(ctx, blobsum)
if err != nil {
return err
}
md, _ := s.opt.MetadataStore.Get(key)
v, err := metadata.NewValue(DiffPair{DiffID: diffID, Blobsum: blobsum})
if err != nil {
return err
}
v.Index = index(blobsum)
return md.Update(func(b *bolt.Bucket) error {
return md.SetValue(b, blobKey, v)
})
}
func index(blob digest.Digest) string {
return "blobmap::" + blob.String()
}

View file

@ -0,0 +1,72 @@
package snapshot
import (
"io/ioutil"
"os"
"sync"
"github.com/containerd/containerd/mount"
"github.com/pkg/errors"
)
type Mounter interface {
Mount() (string, error)
Unmount() error
}
// LocalMounter is a helper for mounting mountfactory to temporary path. In
// addition it can mount binds without privileges
func LocalMounter(mountable Mountable) Mounter {
return &localMounter{mountable: mountable}
}
// LocalMounterWithMounts is a helper for mounting to temporary path. In
// addition it can mount binds without privileges
func LocalMounterWithMounts(mounts []mount.Mount) Mounter {
return &localMounter{mounts: mounts}
}
type localMounter struct {
mu sync.Mutex
mounts []mount.Mount
mountable Mountable
target string
}
func (lm *localMounter) Mount() (string, error) {
lm.mu.Lock()
defer lm.mu.Unlock()
if lm.mounts == nil {
mounts, err := lm.mountable.Mount()
if err != nil {
return "", err
}
lm.mounts = mounts
}
if len(lm.mounts) == 1 && (lm.mounts[0].Type == "bind" || lm.mounts[0].Type == "rbind") {
ro := false
for _, opt := range lm.mounts[0].Options {
if opt == "ro" {
ro = true
break
}
}
if !ro {
return lm.mounts[0].Source, nil
}
}
dir, err := ioutil.TempDir("", "buildkit-mount")
if err != nil {
return "", errors.Wrap(err, "failed to create temp dir")
}
if err := mount.All(lm.mounts, dir); err != nil {
os.RemoveAll(dir)
return "", errors.Wrapf(err, "failed to mount %s: %+v", dir, lm.mounts)
}
lm.target = dir
return dir, nil
}

View file

@ -0,0 +1,29 @@
// +build !windows
package snapshot
import (
"os"
"syscall"
"github.com/containerd/containerd/mount"
)
func (lm *localMounter) Unmount() error {
lm.mu.Lock()
defer lm.mu.Unlock()
if lm.target != "" {
if err := mount.Unmount(lm.target, syscall.MNT_DETACH); err != nil {
return err
}
os.RemoveAll(lm.target)
lm.target = ""
}
if lm.mountable != nil {
return lm.mountable.Release()
}
return nil
}

View file

@ -0,0 +1,26 @@
package snapshot
import (
"os"
"github.com/containerd/containerd/mount"
)
func (lm *localMounter) Unmount() error {
lm.mu.Lock()
defer lm.mu.Unlock()
if lm.target != "" {
if err := mount.Unmount(lm.target, 0); err != nil {
return err
}
os.RemoveAll(lm.target)
lm.target = ""
}
if lm.mountable != nil {
return lm.mountable.Release()
}
return nil
}

137
vendor/github.com/moby/buildkit/snapshot/snapshotter.go generated vendored Normal file
View file

@ -0,0 +1,137 @@
package snapshot
import (
"context"
"sync"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/snapshots"
digest "github.com/opencontainers/go-digest"
)
type Mountable interface {
// ID() string
Mount() ([]mount.Mount, error)
Release() error
}
type SnapshotterBase interface {
Mounts(ctx context.Context, key string) (Mountable, error)
Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) error
View(ctx context.Context, key, parent string, opts ...snapshots.Opt) (Mountable, error)
Stat(ctx context.Context, key string) (snapshots.Info, error)
Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error)
Usage(ctx context.Context, key string) (snapshots.Usage, error)
Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error
Remove(ctx context.Context, key string) error
Walk(ctx context.Context, fn func(context.Context, snapshots.Info) error) error
Close() error
}
// Snapshotter defines interface that any snapshot implementation should satisfy
type Snapshotter interface {
Blobmapper
SnapshotterBase
}
type Blobmapper interface {
GetBlob(ctx context.Context, key string) (digest.Digest, digest.Digest, error)
SetBlob(ctx context.Context, key string, diffID, blob digest.Digest) error
}
func FromContainerdSnapshotter(s snapshots.Snapshotter) SnapshotterBase {
return &fromContainerd{Snapshotter: s}
}
type fromContainerd struct {
snapshots.Snapshotter
}
func (s *fromContainerd) Mounts(ctx context.Context, key string) (Mountable, error) {
mounts, err := s.Snapshotter.Mounts(ctx, key)
if err != nil {
return nil, err
}
return &staticMountable{mounts}, nil
}
func (s *fromContainerd) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) error {
_, err := s.Snapshotter.Prepare(ctx, key, parent, opts...)
return err
}
func (s *fromContainerd) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) (Mountable, error) {
mounts, err := s.Snapshotter.View(ctx, key, parent, opts...)
if err != nil {
return nil, err
}
return &staticMountable{mounts}, nil
}
type staticMountable struct {
mounts []mount.Mount
}
func (m *staticMountable) Mount() ([]mount.Mount, error) {
return m.mounts, nil
}
func (cm *staticMountable) Release() error {
return nil
}
// NewContainerdSnapshotter converts snapshotter to containerd snapshotter
func NewContainerdSnapshotter(s Snapshotter) (snapshots.Snapshotter, func() error) {
cs := &containerdSnapshotter{Snapshotter: s}
return cs, cs.release
}
type containerdSnapshotter struct {
mu sync.Mutex
releasers []func() error
Snapshotter
}
func (cs *containerdSnapshotter) release() error {
cs.mu.Lock()
defer cs.mu.Unlock()
var err error
for _, f := range cs.releasers {
if err1 := f(); err != nil && err == nil {
err = err1
}
}
return err
}
func (cs *containerdSnapshotter) returnMounts(mf Mountable) ([]mount.Mount, error) {
mounts, err := mf.Mount()
if err != nil {
return nil, err
}
cs.mu.Lock()
cs.releasers = append(cs.releasers, mf.Release)
cs.mu.Unlock()
return mounts, nil
}
func (cs *containerdSnapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) {
mf, err := cs.Snapshotter.Mounts(ctx, key)
if err != nil {
return nil, err
}
return cs.returnMounts(mf)
}
func (cs *containerdSnapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) {
if err := cs.Snapshotter.Prepare(ctx, key, parent, opts...); err != nil {
return nil, err
}
return cs.Mounts(ctx, key)
}
func (cs *containerdSnapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) {
mf, err := cs.Snapshotter.View(ctx, key, parent, opts...)
if err != nil {
return nil, err
}
return cs.returnMounts(mf)
}

View file

@ -0,0 +1,449 @@
package boltdbcachestorage
import (
"bytes"
"encoding/json"
"fmt"
"github.com/boltdb/bolt"
"github.com/moby/buildkit/solver"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
const (
resultBucket = "_result"
linksBucket = "_links"
byResultBucket = "_byresult"
backlinksBucket = "_backlinks"
)
type Store struct {
db *bolt.DB
}
func NewStore(dbPath string) (*Store, error) {
db, err := bolt.Open(dbPath, 0600, nil)
if err != nil {
return nil, errors.Wrapf(err, "failed to open database file %s", dbPath)
}
if err := db.Update(func(tx *bolt.Tx) error {
for _, b := range []string{resultBucket, linksBucket, byResultBucket, backlinksBucket} {
if _, err := tx.CreateBucketIfNotExists([]byte(b)); err != nil {
return err
}
}
return nil
}); err != nil {
return nil, err
}
db.NoSync = true
return &Store{db: db}, nil
}
func (s *Store) Exists(id string) bool {
exists := false
err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(linksBucket)).Bucket([]byte(id))
exists = b != nil
return nil
})
if err != nil {
return false
}
return exists
}
func (s *Store) Walk(fn func(id string) error) error {
ids := make([]string, 0)
if err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(linksBucket))
c := b.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
if v == nil {
ids = append(ids, string(k))
}
}
return nil
}); err != nil {
return err
}
for _, id := range ids {
if err := fn(id); err != nil {
return err
}
}
return nil
}
func (s *Store) WalkResults(id string, fn func(solver.CacheResult) error) error {
var list []solver.CacheResult
if err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(resultBucket))
if b == nil {
return nil
}
b = b.Bucket([]byte(id))
if b == nil {
return nil
}
return b.ForEach(func(k, v []byte) error {
var res solver.CacheResult
if err := json.Unmarshal(v, &res); err != nil {
return err
}
list = append(list, res)
return nil
})
}); err != nil {
return err
}
for _, res := range list {
if err := fn(res); err != nil {
return err
}
}
return nil
}
func (s *Store) Load(id string, resultID string) (solver.CacheResult, error) {
var res solver.CacheResult
if err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(resultBucket))
if b == nil {
return errors.WithStack(solver.ErrNotFound)
}
b = b.Bucket([]byte(id))
if b == nil {
return errors.WithStack(solver.ErrNotFound)
}
v := b.Get([]byte(resultID))
if v == nil {
return errors.WithStack(solver.ErrNotFound)
}
return json.Unmarshal(v, &res)
}); err != nil {
return solver.CacheResult{}, err
}
return res, nil
}
func (s *Store) AddResult(id string, res solver.CacheResult) error {
return s.db.Update(func(tx *bolt.Tx) error {
b, err := tx.Bucket([]byte(resultBucket)).CreateBucketIfNotExists([]byte(id))
if err != nil {
return err
}
dt, err := json.Marshal(res)
if err != nil {
return err
}
if err := b.Put([]byte(res.ID), dt); err != nil {
return err
}
b, err = tx.Bucket([]byte(byResultBucket)).CreateBucketIfNotExists([]byte(res.ID))
if err != nil {
return err
}
if err := b.Put([]byte(id), []byte{}); err != nil {
return err
}
return nil
})
}
func (s *Store) WalkIDsByResult(resultID string, fn func(string) error) error {
ids := map[string]struct{}{}
if err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(byResultBucket))
if b == nil {
return nil
}
b = b.Bucket([]byte(resultID))
if b == nil {
return nil
}
return b.ForEach(func(k, v []byte) error {
ids[string(k)] = struct{}{}
return nil
})
}); err != nil {
return err
}
for id := range ids {
if err := fn(id); err != nil {
return err
}
}
return nil
}
func (s *Store) Release(resultID string) error {
return s.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(byResultBucket))
if b == nil {
return errors.WithStack(solver.ErrNotFound)
}
b = b.Bucket([]byte(resultID))
if b == nil {
return errors.WithStack(solver.ErrNotFound)
}
if err := b.ForEach(func(k, v []byte) error {
return s.releaseHelper(tx, string(k), resultID)
}); err != nil {
return err
}
return nil
})
}
func (s *Store) releaseHelper(tx *bolt.Tx, id, resultID string) error {
results := tx.Bucket([]byte(resultBucket)).Bucket([]byte(id))
if results == nil {
return nil
}
if err := results.Delete([]byte(resultID)); err != nil {
return err
}
ids := tx.Bucket([]byte(byResultBucket))
ids = ids.Bucket([]byte(resultID))
if ids == nil {
return nil
}
if err := ids.Delete([]byte(resultID)); err != nil {
return err
}
if isEmptyBucket(ids) {
if err := tx.Bucket([]byte(byResultBucket)).DeleteBucket([]byte(resultID)); err != nil {
return err
}
}
links := tx.Bucket([]byte(resultBucket))
if results == nil {
return nil
}
links = links.Bucket([]byte(id))
return s.emptyBranchWithParents(tx, []byte(id))
}
func (s *Store) emptyBranchWithParents(tx *bolt.Tx, id []byte) error {
results := tx.Bucket([]byte(resultBucket)).Bucket(id)
if results == nil {
return nil
}
isEmptyLinks := true
links := tx.Bucket([]byte(linksBucket)).Bucket(id)
if links != nil {
isEmptyLinks = isEmptyBucket(links)
}
if !isEmptyBucket(results) || !isEmptyLinks {
return nil
}
if backlinks := tx.Bucket([]byte(backlinksBucket)).Bucket(id); backlinks != nil {
if err := backlinks.ForEach(func(k, v []byte) error {
if subLinks := tx.Bucket([]byte(linksBucket)).Bucket(k); subLinks != nil {
if err := subLinks.ForEach(func(k, v []byte) error {
parts := bytes.Split(k, []byte("@"))
if len(parts) != 2 {
return errors.Errorf("invalid key %s", k)
}
if bytes.Equal(id, parts[1]) {
return subLinks.Delete(k)
}
return nil
}); err != nil {
return err
}
if isEmptyBucket(subLinks) {
if err := tx.Bucket([]byte(linksBucket)).DeleteBucket(k); err != nil {
return err
}
}
}
return s.emptyBranchWithParents(tx, k)
}); err != nil {
return err
}
if err := tx.Bucket([]byte(backlinksBucket)).DeleteBucket(id); err != nil {
return err
}
}
return nil
}
func (s *Store) AddLink(id string, link solver.CacheInfoLink, target string) error {
return s.db.Update(func(tx *bolt.Tx) error {
b, err := tx.Bucket([]byte(linksBucket)).CreateBucketIfNotExists([]byte(id))
if err != nil {
return err
}
dt, err := json.Marshal(link)
if err != nil {
return err
}
if err := b.Put(bytes.Join([][]byte{dt, []byte(target)}, []byte("@")), []byte{}); err != nil {
return err
}
b, err = tx.Bucket([]byte(backlinksBucket)).CreateBucketIfNotExists([]byte(target))
if err != nil {
return err
}
if err := b.Put([]byte(id), []byte{}); err != nil {
return err
}
return nil
})
}
func (s *Store) WalkLinks(id string, link solver.CacheInfoLink, fn func(id string) error) error {
var links []string
if err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(linksBucket))
if b == nil {
return nil
}
b = b.Bucket([]byte(id))
if b == nil {
return nil
}
dt, err := json.Marshal(link)
if err != nil {
return err
}
index := bytes.Join([][]byte{dt, {}}, []byte("@"))
c := b.Cursor()
k, _ := c.Seek([]byte(index))
for {
if k != nil && bytes.HasPrefix(k, index) {
target := bytes.TrimPrefix(k, index)
links = append(links, string(target))
k, _ = c.Next()
} else {
break
}
}
return nil
}); err != nil {
return err
}
for _, l := range links {
if err := fn(l); err != nil {
return err
}
}
return nil
}
func (s *Store) HasLink(id string, link solver.CacheInfoLink, target string) bool {
var v bool
if err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(linksBucket))
if b == nil {
return nil
}
b = b.Bucket([]byte(id))
if b == nil {
return nil
}
dt, err := json.Marshal(link)
if err != nil {
return err
}
v = b.Get(bytes.Join([][]byte{dt, []byte(target)}, []byte("@"))) != nil
return nil
}); err != nil {
return false
}
return v
}
func (s *Store) WalkBacklinks(id string, fn func(id string, link solver.CacheInfoLink) error) error {
var outIDs []string
var outLinks []solver.CacheInfoLink
if err := s.db.View(func(tx *bolt.Tx) error {
links := tx.Bucket([]byte(linksBucket))
if links == nil {
return nil
}
backLinks := tx.Bucket([]byte(backlinksBucket))
if backLinks == nil {
return nil
}
b := backLinks.Bucket([]byte(id))
if b == nil {
return nil
}
if err := b.ForEach(func(bid, v []byte) error {
b = links.Bucket(bid)
if b == nil {
return nil
}
if err := b.ForEach(func(k, v []byte) error {
parts := bytes.Split(k, []byte("@"))
if len(parts) == 2 {
if string(parts[1]) != id {
return nil
}
var l solver.CacheInfoLink
if err := json.Unmarshal(parts[0], &l); err != nil {
return err
}
l.Digest = digest.FromBytes([]byte(fmt.Sprintf("%s@%d", l.Digest, l.Output)))
l.Output = 0
outIDs = append(outIDs, string(bid))
outLinks = append(outLinks, l)
}
return nil
}); err != nil {
return err
}
return nil
}); err != nil {
return err
}
return nil
}); err != nil {
return err
}
for i := range outIDs {
if err := fn(outIDs[i], outLinks[i]); err != nil {
return err
}
}
return nil
}
func isEmptyBucket(b *bolt.Bucket) bool {
if b == nil {
return true
}
k, _ := b.Cursor().First()
return k == nil
}

66
vendor/github.com/moby/buildkit/solver/cachekey.go generated vendored Normal file
View file

@ -0,0 +1,66 @@
package solver
import (
"sync"
digest "github.com/opencontainers/go-digest"
)
// NewCacheKey creates a new cache key for a specific output index
func NewCacheKey(dgst digest.Digest, output Index) *CacheKey {
return &CacheKey{
ID: rootKey(dgst, output).String(),
digest: dgst,
output: output,
ids: map[*cacheManager]string{},
}
}
// CacheKeyWithSelector combines a cache key with an optional selector digest.
// Used to limit the matches for dependency cache key.
type CacheKeyWithSelector struct {
Selector digest.Digest
CacheKey ExportableCacheKey
}
type CacheKey struct {
mu sync.RWMutex
ID string
deps [][]CacheKeyWithSelector // only [][]*inMemoryCacheKey
digest digest.Digest
output Index
ids map[*cacheManager]string
indexIDs []string
}
func (ck *CacheKey) Deps() [][]CacheKeyWithSelector {
ck.mu.RLock()
defer ck.mu.RUnlock()
deps := make([][]CacheKeyWithSelector, len(ck.deps))
for i := range ck.deps {
deps[i] = append([]CacheKeyWithSelector(nil), ck.deps[i]...)
}
return deps
}
func (ck *CacheKey) Digest() digest.Digest {
return ck.digest
}
func (ck *CacheKey) Output() Index {
return ck.output
}
func (ck *CacheKey) clone() *CacheKey {
nk := &CacheKey{
ID: ck.ID,
digest: ck.digest,
output: ck.output,
ids: map[*cacheManager]string{},
}
for cm, id := range ck.ids {
nk.ids[cm] = id
}
return nk
}

Some files were not shown because too many files have changed in this diff Show more