Merge pull request #45367 from tonistiigi/vendor-buildkit-v0.11.6

vendor: update buildkit to v0.11.6
This commit is contained in:
Akihiro Suda 2023-04-24 18:20:51 +09:00 committed by GitHub
commit 9ff00e35f8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 90 additions and 103 deletions

View file

@ -50,7 +50,7 @@ import (
)
func init() {
version.Version = "v0.11.5"
version.Version = "v0.11.6"
}
const labelCreatedAt = "buildkit/createdat"

View file

@ -56,7 +56,7 @@ require (
github.com/klauspost/compress v1.16.3
github.com/miekg/dns v1.1.43
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
github.com/moby/buildkit v0.11.5 // IMPORTANT: when updating, also update the version in builder/builder-next/worker/worker.go
github.com/moby/buildkit v0.11.6 // IMPORTANT: when updating, also update the version in builder/builder-next/worker/worker.go
github.com/moby/ipvs v1.1.0
github.com/moby/locker v1.0.1
github.com/moby/patternmatcher v0.5.0

View file

@ -1040,8 +1040,8 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/moby/buildkit v0.8.1/go.mod h1:/kyU1hKy/aYCuP39GZA9MaKioovHku57N6cqlKZIaiQ=
github.com/moby/buildkit v0.11.5 h1:S6YrFJ0bfBT2w9e8kOxqsDV8Bw+HtfqdB6eHL17BXRI=
github.com/moby/buildkit v0.11.5/go.mod h1:P5Qi041LvCfhkfYBHry+Rwoo3Wi6H971J2ggE+PcIoo=
github.com/moby/buildkit v0.11.6 h1:VYNdoKk5TVxN7k4RvZgdeM4GOyRvIi4Z8MXOY7xvyUs=
github.com/moby/buildkit v0.11.6/go.mod h1:GCqKfHhz+pddzfgaR7WmHVEE3nKKZMMDPpK8mh3ZLv4=
github.com/moby/ipvs v1.1.0 h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ=
github.com/moby/ipvs v1.1.0/go.mod h1:4VJMWuf098bsUMmZEiD4Tjk/O7mOn3l1PTD3s4OoYAs=
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=

View file

@ -92,7 +92,7 @@ func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Ex
root := opt.Root
if err := os.MkdirAll(root, 0o711); err != nil {
if err := os.MkdirAll(root, 0711); err != nil {
return nil, errors.Wrapf(err, "failed to create %s", root)
}
@ -205,7 +205,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
}
bundle := filepath.Join(w.root, id)
if err := os.Mkdir(bundle, 0o711); err != nil {
if err := os.Mkdir(bundle, 0711); err != nil {
return err
}
defer os.RemoveAll(bundle)
@ -216,7 +216,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
}
rootFSPath := filepath.Join(bundle, "rootfs")
if err := idtools.MkdirAllAndChown(rootFSPath, 0o700, identity); err != nil {
if err := idtools.MkdirAllAndChown(rootFSPath, 0700, identity); err != nil {
return err
}
if err := mount.All(rootMount, rootFSPath); err != nil {
@ -270,7 +270,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
return errors.Wrapf(err, "working dir %s points to invalid target", newp)
}
if _, err := os.Stat(newp); err != nil {
if err := idtools.MkdirAllAndChown(newp, 0o755, identity); err != nil {
if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil {
return errors.Wrapf(err, "failed to create working directory %s", newp)
}
}
@ -287,10 +287,42 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
return err
}
// runCtx/killCtx is used for extra check in case the kill command blocks
runCtx, cancelRun := context.WithCancel(context.Background())
defer cancelRun()
ended := make(chan struct{})
go func() {
for {
select {
case <-ctx.Done():
killCtx, timeout := context.WithTimeout(context.Background(), 7*time.Second)
if err := w.runc.Kill(killCtx, id, int(syscall.SIGKILL), nil); err != nil {
bklog.G(ctx).Errorf("failed to kill runc %s: %+v", id, err)
select {
case <-killCtx.Done():
timeout()
cancelRun()
return
default:
}
}
timeout()
select {
case <-time.After(50 * time.Millisecond):
case <-ended:
return
}
case <-ended:
return
}
}
}()
bklog.G(ctx).Debugf("> creating %s %v", id, meta.Args)
trace.SpanFromContext(ctx).AddEvent("Container created")
err = w.run(ctx, id, bundle, process, func() {
err = w.run(runCtx, id, bundle, process, func() {
startedOnce.Do(func() {
trace.SpanFromContext(ctx).AddEvent("Container started")
if started != nil {
@ -298,6 +330,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
}
})
})
close(ended)
return exitError(ctx, err)
}
@ -429,87 +462,23 @@ func (s *forwardIO) Stderr() io.ReadCloser {
return nil
}
// procHandle is to track the os process so we can send signals to it.
type procHandle struct {
Process *os.Process
ready chan struct{}
ended chan struct{}
shutdown func()
// startingProcess is to track the os process so we can send signals to it.
type startingProcess struct {
Process *os.Process
ready chan struct{}
}
// runcProcessHandle will create a procHandle that will be monitored, where
// on ctx.Done the process will be killed. If the kill fails, then the cancel
// will be called. This is to allow for runc to go through its normal shutdown
// procedure if the ctx is canceled and to ensure there are no zombie processes
// left by runc.
func runcProcessHandle(ctx context.Context, id string) (*procHandle, context.Context) {
runcCtx, cancel := context.WithCancel(context.Background())
p := &procHandle{
ready: make(chan struct{}),
ended: make(chan struct{}),
shutdown: cancel,
}
// preserve the logger on the context used for the runc process handling
runcCtx = bklog.WithLogger(runcCtx, bklog.G(ctx))
go func() {
// Wait for pid
select {
case <-ctx.Done():
return // nothing to kill
case <-p.ready:
}
for {
select {
case <-ctx.Done():
killCtx, timeout := context.WithTimeout(context.Background(), 7*time.Second)
if err := p.Process.Kill(); err != nil {
bklog.G(ctx).Errorf("failed to kill runc %s: %+v", id, err)
select {
case <-killCtx.Done():
timeout()
cancel()
return
default:
}
}
timeout()
select {
case <-time.After(50 * time.Millisecond):
case <-p.ended:
return
}
case <-p.ended:
return
}
}
}()
return p, runcCtx
}
// Release will free resources with a procHandle.
func (p *procHandle) Release() {
close(p.ended)
// Release will free resources with a startingProcess.
func (p *startingProcess) Release() {
if p.Process != nil {
p.Process.Release()
}
}
// Shutdown should be called after the runc process has exited. This will allow
// the signal handling and tty resize loops to exit, terminating the
// goroutines.
func (p *procHandle) Shutdown() {
if p.shutdown != nil {
p.shutdown()
}
}
// WaitForReady will wait until the Process has been populated or the
// provided context was cancelled. This should be called before using
// the Process field.
func (p *procHandle) WaitForReady(ctx context.Context) error {
func (p *startingProcess) WaitForReady(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
@ -521,7 +490,7 @@ func (p *procHandle) WaitForReady(ctx context.Context) error {
// WaitForStart will record the pid reported by Runc via the channel.
// We wait for up to 10s for the runc process to start. If the started
// callback is non-nil it will be called after receiving the pid.
func (p *procHandle) WaitForStart(ctx context.Context, startedCh <-chan int, started func()) error {
func (p *startingProcess) WaitForStart(ctx context.Context, startedCh <-chan int, started func()) error {
startedCtx, timeout := context.WithTimeout(ctx, 10*time.Second)
defer timeout()
var err error
@ -546,7 +515,7 @@ func (p *procHandle) WaitForStart(ctx context.Context, startedCh <-chan int, sta
// handleSignals will wait until the runcProcess is ready then will
// send each signal received on the channel to the process.
func handleSignals(ctx context.Context, runcProcess *procHandle, signals <-chan syscall.Signal) error {
func handleSignals(ctx context.Context, runcProcess *startingProcess, signals <-chan syscall.Signal) error {
if signals == nil {
return nil
}

View file

@ -49,20 +49,23 @@ type runcCall func(ctx context.Context, started chan<- int, io runc.IO) error
// is only supported for linux, so this really just handles signal propagation
// to the started runc process.
func (w *runcExecutor) commonCall(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), call runcCall) error {
runcProcess, ctx := runcProcessHandle(ctx, id)
runcProcess := &startingProcess{
ready: make(chan struct{}),
}
defer runcProcess.Release()
eg, ctx := errgroup.WithContext(ctx)
var eg errgroup.Group
egCtx, cancel := context.WithCancel(ctx)
defer eg.Wait()
defer runcProcess.Shutdown()
defer cancel()
startedCh := make(chan int, 1)
eg.Go(func() error {
return runcProcess.WaitForStart(ctx, startedCh, started)
return runcProcess.WaitForStart(egCtx, startedCh, started)
})
eg.Go(func() error {
return handleSignals(ctx, runcProcess, process.Signal)
return handleSignals(egCtx, runcProcess, process.Signal)
})
return call(ctx, startedCh, &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr})

View file

@ -44,20 +44,23 @@ func (w *runcExecutor) exec(ctx context.Context, id, bundle string, specsProcess
type runcCall func(ctx context.Context, started chan<- int, io runc.IO) error
func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), call runcCall) error {
runcProcess, ctx := runcProcessHandle(ctx, id)
runcProcess := &startingProcess{
ready: make(chan struct{}),
}
defer runcProcess.Release()
eg, ctx := errgroup.WithContext(ctx)
var eg errgroup.Group
egCtx, cancel := context.WithCancel(ctx)
defer eg.Wait()
defer runcProcess.Shutdown()
defer cancel()
startedCh := make(chan int, 1)
eg.Go(func() error {
return runcProcess.WaitForStart(ctx, startedCh, started)
return runcProcess.WaitForStart(egCtx, startedCh, started)
})
eg.Go(func() error {
return handleSignals(ctx, runcProcess, process.Signal)
return handleSignals(egCtx, runcProcess, process.Signal)
})
if !process.Meta.Tty {
@ -81,7 +84,7 @@ func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, proces
}
pts.Close()
ptm.Close()
runcProcess.Shutdown()
cancel() // this will shutdown resize and signal loops
err := eg.Wait()
if err != nil {
bklog.G(ctx).Warningf("error while shutting down tty io: %s", err)
@ -116,13 +119,13 @@ func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, proces
}
eg.Go(func() error {
err := runcProcess.WaitForReady(ctx)
err := runcProcess.WaitForReady(egCtx)
if err != nil {
return err
}
for {
select {
case <-ctx.Done():
case <-egCtx.Done():
return nil
case resize := <-process.Resize:
err = ptm.Resize(console.WinSize{

View file

@ -30,9 +30,8 @@ const fileCacheType = "buildkit.file.v0"
type fileOp struct {
op *pb.FileOp
md cache.MetadataStore
w worker.Worker
solver *FileOpSolver
refManager *file.RefManager
numInputs int
parallelism *semaphore.Weighted
}
@ -41,12 +40,12 @@ func NewFileOp(v solver.Vertex, op *pb.Op_File, cm cache.Manager, parallelism *s
if err := opsutils.Validate(&pb.Op{Op: op}); err != nil {
return nil, err
}
refManager := file.NewRefManager(cm, v.Name())
return &fileOp{
op: op.File,
md: cm,
numInputs: len(v.Inputs()),
w: w,
solver: NewFileOpSolver(w, &file.Backend{}, file.NewRefManager(cm, v.Name())),
refManager: refManager,
numInputs: len(v.Inputs()),
parallelism: parallelism,
}, nil
}
@ -168,7 +167,8 @@ func (f *fileOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu
inpRefs = append(inpRefs, workerRef.ImmutableRef)
}
outs, err := f.solver.Solve(ctx, inpRefs, f.op.Actions, g)
fs := NewFileOpSolver(f.w, &file.Backend{}, f.refManager)
outs, err := fs.Solve(ctx, inpRefs, f.op.Actions, g)
if err != nil {
return nil, err
}

View file

@ -38,6 +38,10 @@ func SBOMProcessor(scannerRef string, useCache bool) llbsolver.Processor {
if !ok {
return nil, errors.Errorf("could not find ref %s", p.ID)
}
if ref == nil {
continue
}
defop, err := llb.NewDefinitionOp(ref.Definition())
if err != nil {
return nil, err

View file

@ -356,7 +356,15 @@ func (ah *authHandler) fetchToken(ctx context.Context, sm *session.Manager, g se
if resp.ExpiresIn == 0 {
resp.ExpiresIn = defaultExpiration
}
issuedAt, expires = time.Unix(resp.IssuedAt, 0), int(resp.ExpiresIn)
expires = int(resp.ExpiresIn)
// We later check issuedAt.isZero, which would return
// false if converted from zero Unix time. Therefore,
// zero time value in response is handled separately
if resp.IssuedAt == 0 {
issuedAt = time.Time{}
} else {
issuedAt = time.Unix(resp.IssuedAt, 0)
}
token = resp.Token
return nil, nil
}

2
vendor/modules.txt vendored
View file

@ -576,7 +576,7 @@ github.com/mistifyio/go-zfs
# github.com/mitchellh/hashstructure/v2 v2.0.2
## explicit; go 1.14
github.com/mitchellh/hashstructure/v2
# github.com/moby/buildkit v0.11.5
# github.com/moby/buildkit v0.11.6
## explicit; go 1.18
github.com/moby/buildkit/api/services/control
github.com/moby/buildkit/api/types