vendor: github.com/moby/buildkit v0.13.0-rc2

Signed-off-by: Paweł Gronowski <pawel.gronowski@docker.com>
This commit is contained in:
Paweł Gronowski 2024-02-19 16:08:45 +01:00
parent f90b03ee5d
commit 31545c3b67
No known key found for this signature in database
GPG key ID: B85EFCFE26DEF92A
358 changed files with 25180 additions and 4537 deletions

View file

@ -60,7 +60,7 @@ require (
github.com/miekg/dns v1.1.43 github.com/miekg/dns v1.1.43
github.com/mistifyio/go-zfs/v3 v3.0.1 github.com/mistifyio/go-zfs/v3 v3.0.1
github.com/mitchellh/copystructure v1.2.0 github.com/mitchellh/copystructure v1.2.0
github.com/moby/buildkit v0.12.5 github.com/moby/buildkit v0.13.0-rc2
github.com/moby/docker-image-spec v1.3.1 github.com/moby/docker-image-spec v1.3.1
github.com/moby/ipvs v1.1.0 github.com/moby/ipvs v1.1.0
github.com/moby/locker v1.0.1 github.com/moby/locker v1.0.1
@ -136,13 +136,14 @@ require (
github.com/cilium/ebpf v0.11.0 // indirect github.com/cilium/ebpf v0.11.0 // indirect
github.com/container-storage-interface/spec v1.5.0 // indirect github.com/container-storage-interface/spec v1.5.0 // indirect
github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect
github.com/containerd/console v1.0.3 // indirect github.com/containerd/console v1.0.4 // indirect
github.com/containerd/go-cni v1.1.9 // indirect github.com/containerd/go-cni v1.1.9 // indirect
github.com/containerd/go-runc v1.1.0 // indirect github.com/containerd/go-runc v1.1.0 // indirect
github.com/containerd/nydus-snapshotter v0.13.7 // indirect github.com/containerd/nydus-snapshotter v0.13.7 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
github.com/containerd/ttrpc v1.2.2 // indirect github.com/containerd/ttrpc v1.2.2 // indirect
github.com/containernetworking/cni v1.1.2 // indirect github.com/containernetworking/cni v1.1.2 // indirect
github.com/containernetworking/plugins v1.4.0 // indirect
github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect
@ -185,7 +186,7 @@ require (
github.com/spdx/tools-golang v0.5.1 // indirect github.com/spdx/tools-golang v0.5.1 // indirect
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
github.com/tinylib/msgp v1.1.8 // indirect github.com/tinylib/msgp v1.1.8 // indirect
github.com/tonistiigi/fsutil v0.0.0-20230629203738-36ef4d8c0dbb // indirect github.com/tonistiigi/fsutil v0.0.0-20240223190444-7a889f53dbf6 // indirect
github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7 // indirect github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7 // indirect
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 // indirect github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 // indirect
@ -198,9 +199,14 @@ require (
go.etcd.io/etcd/server/v3 v3.5.6 // indirect go.etcd.io/etcd/server/v3 v3.5.6 // indirect
go.opencensus.io v0.24.0 // indirect go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.46.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.46.1 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect
go.opentelemetry.io/otel/exporters/prometheus v0.42.0 // indirect
go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.21.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect
go.uber.org/atomic v1.9.0 // indirect go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.8.0 // indirect go.uber.org/multierr v1.8.0 // indirect

View file

@ -304,8 +304,8 @@ github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro=
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
@ -334,8 +334,8 @@ github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3
github.com/containerd/nydus-snapshotter v0.13.7 h1:x7DHvGnzJOu1ZPwPYkeOPk5MjZZYbdddygEjaSDoFTk= github.com/containerd/nydus-snapshotter v0.13.7 h1:x7DHvGnzJOu1ZPwPYkeOPk5MjZZYbdddygEjaSDoFTk=
github.com/containerd/nydus-snapshotter v0.13.7/go.mod h1:VPVKQ3jmHFIcUIV2yiQ1kImZuBFS3GXDohKs9mRABVE= github.com/containerd/nydus-snapshotter v0.13.7/go.mod h1:VPVKQ3jmHFIcUIV2yiQ1kImZuBFS3GXDohKs9mRABVE=
github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116/go.mod h1:o59b3PCKVAf9jjiKtCc/9hLAd+5p/rfhBfm6aBcTEr4= github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116/go.mod h1:o59b3PCKVAf9jjiKtCc/9hLAd+5p/rfhBfm6aBcTEr4=
github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtOs= github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtOs=
@ -347,6 +347,8 @@ github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3H
github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ=
github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
github.com/containernetworking/plugins v1.4.0 h1:+w22VPYgk7nQHw7KT92lsRmuToHvb7wwSv9iTbXzzic=
github.com/containernetworking/plugins v1.4.0/go.mod h1:UYhcOyjefnrQvKvmmyEKsUA+M9Nfn7tqULPpH0Pkcj0=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ= github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ=
@ -654,8 +656,9 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk=
github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg= github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg=
github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
@ -910,8 +913,8 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs= github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs=
github.com/moby/buildkit v0.8.1/go.mod h1:/kyU1hKy/aYCuP39GZA9MaKioovHku57N6cqlKZIaiQ= github.com/moby/buildkit v0.8.1/go.mod h1:/kyU1hKy/aYCuP39GZA9MaKioovHku57N6cqlKZIaiQ=
github.com/moby/buildkit v0.12.5 h1:RNHH1l3HDhYyZafr5EgstEu8aGNCwyfvMtrQDtjH9T0= github.com/moby/buildkit v0.13.0-rc2 h1:LWAIkaBIoRTne57NJCnFMdFV30auPia3j9UUZeUc24A=
github.com/moby/buildkit v0.12.5/go.mod h1:YGwjA2loqyiYfZeEo8FtI7z4x5XponAaIWsWcSjWwso= github.com/moby/buildkit v0.13.0-rc2/go.mod h1:RWPZ1bRcehlF1bjPzj7+wOPZ5cLViAEtx5ZNQWma5/s=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/ipvs v1.1.0 h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ= github.com/moby/ipvs v1.1.0 h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ=
@ -984,8 +987,8 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs=
github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@ -995,8 +998,8 @@ github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@ -1242,8 +1245,8 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1
github.com/tommy-muehle/go-mnd v1.1.1/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= github.com/tommy-muehle/go-mnd v1.1.1/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig=
github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig=
github.com/tonistiigi/fsutil v0.0.0-20201103201449-0834f99b7b85/go.mod h1:a7cilN64dG941IOXfhJhlH0qB92hxJ9A1ewrdUmJ6xo= github.com/tonistiigi/fsutil v0.0.0-20201103201449-0834f99b7b85/go.mod h1:a7cilN64dG941IOXfhJhlH0qB92hxJ9A1ewrdUmJ6xo=
github.com/tonistiigi/fsutil v0.0.0-20230629203738-36ef4d8c0dbb h1:uUe8rNyVXM8moActoBol6Xf6xX2GMr7SosR2EywMvGg= github.com/tonistiigi/fsutil v0.0.0-20240223190444-7a889f53dbf6 h1:v9u6pmdUkarXL/1S/6LGcG9wsiBLd9N/WyJq/Y9WPcg=
github.com/tonistiigi/fsutil v0.0.0-20230629203738-36ef4d8c0dbb/go.mod h1:SxX/oNQ/ag6Vaoli547ipFK9J7BZn5JqJG0JE8lf8bA= github.com/tonistiigi/fsutil v0.0.0-20240223190444-7a889f53dbf6/go.mod h1:vbbYqJlnswsbJqWUcJN8fKtBhnEgldDrcagTgnBVKKM=
github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7 h1:8eY6m1mjgyB8XySUR7WvebTM8D/Vs86jLJzD/Tw7zkc= github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7 h1:8eY6m1mjgyB8XySUR7WvebTM8D/Vs86jLJzD/Tw7zkc=
github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7/go.mod h1:qqvyZqkfwkoJuPU/bw61bItaoO0SJ8YSW0vSVRRvsRg= github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7/go.mod h1:qqvyZqkfwkoJuPU/bw61bItaoO0SJ8YSW0vSVRRvsRg=
github.com/tonistiigi/go-archvariant v1.0.0 h1:5LC1eDWiBNflnTF1prCiX09yfNHIxDC/aukdhCdTyb0= github.com/tonistiigi/go-archvariant v1.0.0 h1:5LC1eDWiBNflnTF1prCiX09yfNHIxDC/aukdhCdTyb0=
@ -1353,6 +1356,12 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:
go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU=
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 h1:ZtfnDL+tUrs1F0Pzfwbg2d59Gru9NCH3bgSHBM6LDwU=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0/go.mod h1:hG4Fj/y8TR/tlEDREo8tWstl9fO9gcFkn4xrx0Io8xU=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 h1:NmnYCiR0qNufkldjVvyQfZTHSdzeHoZ41zggMsdMcLM=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0/go.mod h1:UVAO61+umUsHLtYb8KXXRoHtxUkdOPkYidzW3gipRLQ=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 h1:wNMDy/LVGLj2h3p6zg4d0gypKfWKSWI14E1C4smOgl8=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0/go.mod h1:YfbDdXAAkemWJK3H/DshvlrxqFB2rtW4rY6ky/3x/H0=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
@ -1361,11 +1370,15 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqhe
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
go.opentelemetry.io/otel/exporters/prometheus v0.42.0 h1:jwV9iQdvp38fxXi8ZC+lNpxjK16MRcZlpDYvbuO1FiA=
go.opentelemetry.io/otel/exporters/prometheus v0.42.0/go.mod h1:f3bYiqNqhoPxkvI2LrXqQVC546K7BuRDL/kKuxkujhA=
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI=
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
go.opentelemetry.io/otel/sdk/metric v1.21.0 h1:smhI5oD714d6jHE6Tie36fPx4WDFIg+Y6RfAY4ICcR0=
go.opentelemetry.io/otel/sdk/metric v1.21.0/go.mod h1:FJ8RAsoPGv/wYMgBdUJXOm+6pzFY3YdljnXtv1SBE8Q=
go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk=
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=

View file

@ -1,16 +1,16 @@
linters: linters:
enable: enable:
- structcheck
- varcheck
- staticcheck
- unconvert
- gofmt - gofmt
- goimports - goimports
- golint
- ineffassign - ineffassign
- vet
- unused
- misspell - misspell
- revive
- staticcheck
- structcheck
- unconvert
- unused
- varcheck
- vet
disable: disable:
- errcheck - errcheck

View file

@ -22,8 +22,8 @@ current.Resize(ws)
console is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). console is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
As a containerd sub-project, you will find the: As a containerd sub-project, you will find the:
* [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
* [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
information in our [`containerd/project`](https://github.com/containerd/project) repository. information in our [`containerd/project`](https://github.com/containerd/project) repository.

View file

@ -22,7 +22,10 @@ import (
"os" "os"
) )
var ErrNotAConsole = errors.New("provided file is not a console") var (
ErrNotAConsole = errors.New("provided file is not a console")
ErrNotImplemented = errors.New("not implemented")
)
type File interface { type File interface {
io.ReadWriteCloser io.ReadWriteCloser
@ -45,7 +48,7 @@ type Console interface {
SetRaw() error SetRaw() error
// DisableEcho disables echo on the console // DisableEcho disables echo on the console
DisableEcho() error DisableEcho() error
// Reset restores the console to its orignal state // Reset restores the console to its original state
Reset() error Reset() error
// Size returns the window size of the console // Size returns the window size of the console
Size() (WinSize, error) Size() (WinSize, error)
@ -78,7 +81,7 @@ func Current() (c Console) {
} }
// ConsoleFromFile returns a console using the provided file // ConsoleFromFile returns a console using the provided file
// nolint:golint // nolint:revive
func ConsoleFromFile(f File) (Console, error) { func ConsoleFromFile(f File) (Console, error) {
if err := checkConsole(f); err != nil { if err := checkConsole(f); err != nil {
return nil, err return nil, err

View file

@ -1,3 +1,4 @@
//go:build linux
// +build linux // +build linux
/* /*

36
vendor/github.com/containerd/console/console_other.go generated vendored Normal file
View file

@ -0,0 +1,36 @@
//go:build !darwin && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
// +build !darwin,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package console
// NewPty creates a new pty pair
// The master is returned as the first console and a string
// with the path to the pty slave is returned as the second
func NewPty() (Console, string, error) {
return nil, "", ErrNotImplemented
}
// checkConsole checks if the provided file is a console
func checkConsole(f File) error {
return ErrNotAConsole
}
func newMaster(f File) (Console, error) {
return nil, ErrNotImplemented
}

View file

@ -1,4 +1,5 @@
// +build darwin freebsd linux netbsd openbsd solaris //go:build darwin || freebsd || linux || netbsd || openbsd || zos
// +build darwin freebsd linux netbsd openbsd zos
/* /*
Copyright The containerd Authors. Copyright The containerd Authors.

View file

@ -24,12 +24,13 @@ import (
"golang.org/x/sys/windows" "golang.org/x/sys/windows"
) )
var ( var vtInputSupported bool
vtInputSupported bool
ErrNotImplemented = errors.New("not implemented")
)
func (m *master) initStdios() { func (m *master) initStdios() {
// Note: We discard console mode warnings, because in/out can be redirected.
//
// TODO: Investigate opening CONOUT$/CONIN$ to handle this correctly
m.in = windows.Handle(os.Stdin.Fd()) m.in = windows.Handle(os.Stdin.Fd())
if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil { if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil {
// Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. // Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it.
@ -39,8 +40,6 @@ func (m *master) initStdios() {
// Unconditionally set the console mode back even on failure because SetConsoleMode // Unconditionally set the console mode back even on failure because SetConsoleMode
// remembers invalid bits on input handles. // remembers invalid bits on input handles.
windows.SetConsoleMode(m.in, m.inMode) windows.SetConsoleMode(m.in, m.inMode)
} else {
fmt.Printf("failed to get console mode for stdin: %v\n", err)
} }
m.out = windows.Handle(os.Stdout.Fd()) m.out = windows.Handle(os.Stdout.Fd())
@ -50,8 +49,6 @@ func (m *master) initStdios() {
} else { } else {
windows.SetConsoleMode(m.out, m.outMode) windows.SetConsoleMode(m.out, m.outMode)
} }
} else {
fmt.Printf("failed to get console mode for stdout: %v\n", err)
} }
m.err = windows.Handle(os.Stderr.Fd()) m.err = windows.Handle(os.Stderr.Fd())
@ -61,8 +58,6 @@ func (m *master) initStdios() {
} else { } else {
windows.SetConsoleMode(m.err, m.errMode) windows.SetConsoleMode(m.err, m.errMode)
} }
} else {
fmt.Printf("failed to get console mode for stderr: %v\n", err)
} }
} }
@ -94,6 +89,8 @@ func (m *master) SetRaw() error {
} }
func (m *master) Reset() error { func (m *master) Reset() error {
var errs []error
for _, s := range []struct { for _, s := range []struct {
fd windows.Handle fd windows.Handle
mode uint32 mode uint32
@ -103,10 +100,16 @@ func (m *master) Reset() error {
{m.err, m.errMode}, {m.err, m.errMode},
} { } {
if err := windows.SetConsoleMode(s.fd, s.mode); err != nil { if err := windows.SetConsoleMode(s.fd, s.mode); err != nil {
return fmt.Errorf("unable to restore console mode: %w", err) // we can't just abort on the first error, otherwise we might leave
// the console in an unexpected state.
errs = append(errs, fmt.Errorf("unable to restore console mode: %w", err))
} }
} }
if len(errs) > 0 {
return errs[0]
}
return nil return nil
} }

View file

@ -1,163 +0,0 @@
// +build zos
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package console
import (
"fmt"
"os"
"golang.org/x/sys/unix"
)
// NewPty creates a new pty pair
// The master is returned as the first console and a string
// with the path to the pty slave is returned as the second
func NewPty() (Console, string, error) {
var f File
var err error
var slave string
for i := 0;; i++ {
ptyp := fmt.Sprintf("/dev/ptyp%04d", i)
f, err = os.OpenFile(ptyp, os.O_RDWR, 0600)
if err == nil {
slave = fmt.Sprintf("/dev/ttyp%04d", i)
break
}
if os.IsNotExist(err) {
return nil, "", err
}
// else probably Resource Busy
}
m, err := newMaster(f)
if err != nil {
return nil, "", err
}
return m, slave, nil
}
type master struct {
f File
original *unix.Termios
}
func (m *master) Read(b []byte) (int, error) {
return m.f.Read(b)
}
func (m *master) Write(b []byte) (int, error) {
return m.f.Write(b)
}
func (m *master) Close() error {
return m.f.Close()
}
func (m *master) Resize(ws WinSize) error {
return tcswinsz(m.f.Fd(), ws)
}
func (m *master) ResizeFrom(c Console) error {
ws, err := c.Size()
if err != nil {
return err
}
return m.Resize(ws)
}
func (m *master) Reset() error {
if m.original == nil {
return nil
}
return tcset(m.f.Fd(), m.original)
}
func (m *master) getCurrent() (unix.Termios, error) {
var termios unix.Termios
if err := tcget(m.f.Fd(), &termios); err != nil {
return unix.Termios{}, err
}
return termios, nil
}
func (m *master) SetRaw() error {
rawState, err := m.getCurrent()
if err != nil {
return err
}
rawState = cfmakeraw(rawState)
rawState.Oflag = rawState.Oflag | unix.OPOST
return tcset(m.f.Fd(), &rawState)
}
func (m *master) DisableEcho() error {
rawState, err := m.getCurrent()
if err != nil {
return err
}
rawState.Lflag = rawState.Lflag &^ unix.ECHO
return tcset(m.f.Fd(), &rawState)
}
func (m *master) Size() (WinSize, error) {
return tcgwinsz(m.f.Fd())
}
func (m *master) Fd() uintptr {
return m.f.Fd()
}
func (m *master) Name() string {
return m.f.Name()
}
// checkConsole checks if the provided file is a console
func checkConsole(f File) error {
var termios unix.Termios
if tcget(f.Fd(), &termios) != nil {
return ErrNotAConsole
}
return nil
}
func newMaster(f File) (Console, error) {
m := &master{
f: f,
}
t, err := m.getCurrent()
if err != nil {
return nil, err
}
m.original = &t
return m, nil
}
// ClearONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair
// created by us acts normally. In particular, a not-very-well-known default of
// Linux unix98 ptys is that they have +onlcr by default. While this isn't a
// problem for terminal emulators, because we relay data from the terminal we
// also relay that funky line discipline.
func ClearONLCR(fd uintptr) error {
return setONLCR(fd, false)
}
// SetONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair
// created by us acts as intended for a terminal emulator.
func SetONLCR(fd uintptr) error {
return setONLCR(fd, true)
}

View file

@ -1,3 +1,4 @@
//go:build freebsd && cgo
// +build freebsd,cgo // +build freebsd,cgo
/* /*

View file

@ -1,3 +1,4 @@
//go:build freebsd && !cgo
// +build freebsd,!cgo // +build freebsd,!cgo
/* /*

View file

@ -1,4 +1,5 @@
// +build darwin linux netbsd openbsd solaris //go:build darwin || linux || netbsd || openbsd
// +build darwin linux netbsd openbsd
/* /*
Copyright The containerd Authors. Copyright The containerd Authors.

43
vendor/github.com/containerd/console/pty_zos.go generated vendored Normal file
View file

@ -0,0 +1,43 @@
//go:build zos
// +build zos
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package console
import (
"fmt"
"os"
)
// openpt allocates a new pseudo-terminal by opening the first available /dev/ptypXX device
func openpt() (*os.File, error) {
var f *os.File
var err error
for i := 0; ; i++ {
ptyp := fmt.Sprintf("/dev/ptyp%04d", i)
f, err = os.OpenFile(ptyp, os.O_RDWR, 0600)
if err == nil {
break
}
if os.IsNotExist(err) {
return nil, err
}
// else probably Resource Busy
}
return f, nil
}

View file

@ -1,3 +1,4 @@
//go:build freebsd && cgo
// +build freebsd,cgo // +build freebsd,cgo
/* /*

View file

@ -1,3 +1,4 @@
//go:build freebsd && !cgo
// +build freebsd,!cgo // +build freebsd,!cgo
/* /*

View file

@ -1,3 +1,4 @@
//go:build openbsd && cgo
// +build openbsd,cgo // +build openbsd,cgo
/* /*

View file

@ -1,3 +1,4 @@
//go:build openbsd && !cgo
// +build openbsd,!cgo // +build openbsd,!cgo
/* /*

View file

@ -1,51 +0,0 @@
// +build solaris,cgo
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package console
import (
"os"
"golang.org/x/sys/unix"
)
//#include <stdlib.h>
import "C"
const (
cmdTcGet = unix.TCGETS
cmdTcSet = unix.TCSETS
)
// ptsname retrieves the name of the first available pts for the given master.
func ptsname(f *os.File) (string, error) {
ptspath, err := C.ptsname(C.int(f.Fd()))
if err != nil {
return "", err
}
return C.GoString(ptspath), nil
}
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
// unlockpt should be called before opening the slave side of a pty.
func unlockpt(f *os.File) error {
if _, err := C.grantpt(C.int(f.Fd())); err != nil {
return err
}
return nil
}

View file

@ -1,47 +0,0 @@
// +build solaris,!cgo
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// Implementing the functions below requires cgo support. Non-cgo stubs
// versions are defined below to enable cross-compilation of source code
// that depends on these functions, but the resultant cross-compiled
// binaries cannot actually be used. If the stub function(s) below are
// actually invoked they will display an error message and cause the
// calling process to exit.
//
package console
import (
"os"
"golang.org/x/sys/unix"
)
const (
cmdTcGet = unix.TCGETS
cmdTcSet = unix.TCSETS
)
func ptsname(f *os.File) (string, error) {
panic("ptsname() support requires cgo.")
}
func unlockpt(f *os.File) error {
panic("unlockpt() support requires cgo.")
}

View file

@ -1,4 +1,5 @@
// +build darwin freebsd linux netbsd openbsd solaris zos //go:build darwin || freebsd || linux || netbsd || openbsd || zos
// +build darwin freebsd linux netbsd openbsd zos
/* /*
Copyright The containerd Authors. Copyright The containerd Authors.
@ -83,7 +84,7 @@ func cfmakeraw(t unix.Termios) unix.Termios {
t.Oflag &^= unix.OPOST t.Oflag &^= unix.OPOST
t.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) t.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
t.Cflag &^= (unix.CSIZE | unix.PARENB) t.Cflag &^= (unix.CSIZE | unix.PARENB)
t.Cflag &^= unix.CS8 t.Cflag |= unix.CS8
t.Cc[unix.VMIN] = 1 t.Cc[unix.VMIN] = 1
t.Cc[unix.VTIME] = 0 t.Cc[unix.VTIME] = 0

View file

@ -17,6 +17,9 @@
package console package console
import ( import (
"os"
"strings"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
@ -24,3 +27,13 @@ const (
cmdTcGet = unix.TCGETS cmdTcGet = unix.TCGETS
cmdTcSet = unix.TCSETS cmdTcSet = unix.TCSETS
) )
// unlockpt is a no-op on zos.
func unlockpt(_ *os.File) error {
return nil
}
// ptsname retrieves the name of the first available pts for the given master.
func ptsname(f *os.File) (string, error) {
return "/dev/ttyp" + strings.TrimPrefix(f.Name(), "/dev/ptyp"), nil
}

View file

@ -436,9 +436,8 @@ func importTar(in io.ReaderAt) (*tarFile, error) {
if err != nil { if err != nil {
if err == io.EOF { if err == io.EOF {
break break
} else {
return nil, fmt.Errorf("failed to parse tar file, %w", err)
} }
return nil, fmt.Errorf("failed to parse tar file, %w", err)
} }
switch cleanEntryName(h.Name) { switch cleanEntryName(h.Name) {
case PrefetchLandmark, NoPrefetchLandmark: case PrefetchLandmark, NoPrefetchLandmark:

201
vendor/github.com/containernetworking/plugins/LICENSE generated vendored Normal file
View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,41 @@
### Namespaces, Threads, and Go
On Linux each OS thread can have a different network namespace. Go's thread scheduling model switches goroutines between OS threads based on OS thread load and whether the goroutine would block other goroutines. This can result in a goroutine switching network namespaces without notice and lead to errors in your code.
### Namespace Switching
Switching namespaces with the `ns.Set()` method is not recommended without additional strategies to prevent unexpected namespace changes when your goroutines switch OS threads.
Go provides the `runtime.LockOSThread()` function to ensure a specific goroutine executes on its current OS thread and prevents any other goroutine from running in that thread until the locked one exits. Careful usage of `LockOSThread()` and goroutines can provide good control over which network namespace a given goroutine executes in.
For example, you cannot rely on the `ns.Set()` namespace being the current namespace after the `Set()` call unless you do two things. First, the goroutine calling `Set()` must have previously called `LockOSThread()`. Second, you must ensure `runtime.UnlockOSThread()` is not called somewhere in-between. You also cannot rely on the initial network namespace remaining the current network namespace if any other code in your program switches namespaces, unless you have already called `LockOSThread()` in that goroutine. Note that `LockOSThread()` prevents the Go scheduler from optimally scheduling goroutines for best performance, so `LockOSThread()` should only be used in small, isolated goroutines that release the lock quickly.
### Do() The Recommended Thing
The `ns.Do()` method provides **partial** control over network namespaces for you by implementing these strategies. All code dependent on a particular network namespace (including the root namespace) should be wrapped in the `ns.Do()` method to ensure the correct namespace is selected for the duration of your code. For example:
```go
err = targetNs.Do(func(hostNs ns.NetNS) error {
dummy := &netlink.Dummy{
LinkAttrs: netlink.LinkAttrs{
Name: "dummy0",
},
}
return netlink.LinkAdd(dummy)
})
```
Note this requirement to wrap every network call is very onerous - any libraries you call might call out to network services such as DNS, and all such calls need to be protected after you call `ns.Do()`. All goroutines spawned from within the `ns.Do` will not inherit the new namespace. The CNI plugins all exit very soon after calling `ns.Do()` which helps to minimize the problem.
When a new thread is spawned in Linux, it inherits the namespace of its parent. In versions of go **prior to 1.10**, if the runtime spawns a new OS thread, it picks the parent randomly. If the chosen parent thread has been moved to a new namespace (even temporarily), the new OS thread will be permanently "stuck in the wrong namespace", and goroutines will non-deterministically switch namespaces as they are rescheduled.
In short, **there was no safe way to change network namespaces, even temporarily, from within a long-lived, multithreaded Go process**. If you wish to do this, you must use go 1.10 or greater.
### Creating network namespaces
Earlier versions of this library managed namespace creation, but as CNI does not actually utilize this feature (and it was essentially unmaintained), it was removed. If you're writing a container runtime, you should implement namespace management yourself. However, there are some gotchas when doing so, especially around handling `/var/run/netns`. A reasonably correct reference implementation, borrowed from `rkt`, can be found in `pkg/testutils/netns_linux.go` if you're in need of a source of inspiration.
### Further Reading
- https://github.com/golang/go/wiki/LockOSThread
- http://morsmachine.dk/go-scheduler
- https://github.com/containernetworking/cni/issues/262
- https://golang.org/pkg/runtime/
- https://www.weave.works/blog/linux-namespaces-and-go-don-t-mix

View file

@ -0,0 +1,234 @@
// Copyright 2015-2017 CNI authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ns
import (
"fmt"
"os"
"runtime"
"sync"
"syscall"
"golang.org/x/sys/unix"
)
// Returns an object representing the current OS thread's network namespace
func GetCurrentNS() (NetNS, error) {
// Lock the thread in case other goroutine executes in it and changes its
// network namespace after getCurrentThreadNetNSPath(), otherwise it might
// return an unexpected network namespace.
runtime.LockOSThread()
defer runtime.UnlockOSThread()
return GetNS(getCurrentThreadNetNSPath())
}
func getCurrentThreadNetNSPath() string {
// /proc/self/ns/net returns the namespace of the main thread, not
// of whatever thread this goroutine is running on. Make sure we
// use the thread's net namespace since the thread is switching around
return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid())
}
func (ns *netNS) Close() error {
if err := ns.errorIfClosed(); err != nil {
return err
}
if err := ns.file.Close(); err != nil {
return fmt.Errorf("Failed to close %q: %v", ns.file.Name(), err)
}
ns.closed = true
return nil
}
func (ns *netNS) Set() error {
if err := ns.errorIfClosed(); err != nil {
return err
}
if err := unix.Setns(int(ns.Fd()), unix.CLONE_NEWNET); err != nil {
return fmt.Errorf("Error switching to ns %v: %v", ns.file.Name(), err)
}
return nil
}
type NetNS interface {
// Executes the passed closure in this object's network namespace,
// attempting to restore the original namespace before returning.
// However, since each OS thread can have a different network namespace,
// and Go's thread scheduling is highly variable, callers cannot
// guarantee any specific namespace is set unless operations that
// require that namespace are wrapped with Do(). Also, no code called
// from Do() should call runtime.UnlockOSThread(), or the risk
// of executing code in an incorrect namespace will be greater. See
// https://github.com/golang/go/wiki/LockOSThread for further details.
Do(toRun func(NetNS) error) error
// Sets the current network namespace to this object's network namespace.
// Note that since Go's thread scheduling is highly variable, callers
// cannot guarantee the requested namespace will be the current namespace
// after this function is called; to ensure this wrap operations that
// require the namespace with Do() instead.
Set() error
// Returns the filesystem path representing this object's network namespace
Path() string
// Returns a file descriptor representing this object's network namespace
Fd() uintptr
// Cleans up this instance of the network namespace; if this instance
// is the last user the namespace will be destroyed
Close() error
}
type netNS struct {
file *os.File
closed bool
}
// netNS implements the NetNS interface
var _ NetNS = &netNS{}
const (
// https://github.com/torvalds/linux/blob/master/include/uapi/linux/magic.h
NSFS_MAGIC = unix.NSFS_MAGIC
PROCFS_MAGIC = unix.PROC_SUPER_MAGIC
)
type NSPathNotExistErr struct{ msg string }
func (e NSPathNotExistErr) Error() string { return e.msg }
type NSPathNotNSErr struct{ msg string }
func (e NSPathNotNSErr) Error() string { return e.msg }
func IsNSorErr(nspath string) error {
stat := syscall.Statfs_t{}
if err := syscall.Statfs(nspath, &stat); err != nil {
if os.IsNotExist(err) {
err = NSPathNotExistErr{msg: fmt.Sprintf("failed to Statfs %q: %v", nspath, err)}
} else {
err = fmt.Errorf("failed to Statfs %q: %v", nspath, err)
}
return err
}
switch stat.Type {
case PROCFS_MAGIC, NSFS_MAGIC:
return nil
default:
return NSPathNotNSErr{msg: fmt.Sprintf("unknown FS magic on %q: %x", nspath, stat.Type)}
}
}
// Returns an object representing the namespace referred to by @path
func GetNS(nspath string) (NetNS, error) {
err := IsNSorErr(nspath)
if err != nil {
return nil, err
}
fd, err := os.Open(nspath)
if err != nil {
return nil, err
}
return &netNS{file: fd}, nil
}
func (ns *netNS) Path() string {
return ns.file.Name()
}
func (ns *netNS) Fd() uintptr {
return ns.file.Fd()
}
func (ns *netNS) errorIfClosed() error {
if ns.closed {
return fmt.Errorf("%q has already been closed", ns.file.Name())
}
return nil
}
func (ns *netNS) Do(toRun func(NetNS) error) error {
if err := ns.errorIfClosed(); err != nil {
return err
}
containedCall := func(hostNS NetNS) error {
threadNS, err := GetCurrentNS()
if err != nil {
return fmt.Errorf("failed to open current netns: %v", err)
}
defer threadNS.Close()
// switch to target namespace
if err = ns.Set(); err != nil {
return fmt.Errorf("error switching to ns %v: %v", ns.file.Name(), err)
}
defer func() {
err := threadNS.Set() // switch back
if err == nil {
// Unlock the current thread only when we successfully switched back
// to the original namespace; otherwise leave the thread locked which
// will force the runtime to scrap the current thread, that is maybe
// not as optimal but at least always safe to do.
runtime.UnlockOSThread()
}
}()
return toRun(hostNS)
}
// save a handle to current network namespace
hostNS, err := GetCurrentNS()
if err != nil {
return fmt.Errorf("Failed to open current namespace: %v", err)
}
defer hostNS.Close()
var wg sync.WaitGroup
wg.Add(1)
// Start the callback in a new green thread so that if we later fail
// to switch the namespace back to the original one, we can safely
// leave the thread locked to die without a risk of the current thread
// left lingering with incorrect namespace.
var innerError error
go func() {
defer wg.Done()
runtime.LockOSThread()
innerError = containedCall(hostNS)
}()
wg.Wait()
return innerError
}
// WithNetNSPath executes the passed closure under the given network
// namespace, restoring the original namespace afterwards.
func WithNetNSPath(nspath string, toRun func(NetNS) error) error {
ns, err := GetNS(nspath)
if err != nil {
return err
}
defer ns.Close()
return ns.Do(toRun)
}

View file

@ -1,34 +0,0 @@
package reference
import "github.com/distribution/reference"
// IsNameOnly returns true if reference only contains a repo name.
//
// Deprecated: use [reference.IsNameOnly].
func IsNameOnly(ref reference.Named) bool {
return reference.IsNameOnly(ref)
}
// FamiliarName returns the familiar name string
// for the given named, familiarizing if needed.
//
// Deprecated: use [reference.FamiliarName].
func FamiliarName(ref reference.Named) string {
return reference.FamiliarName(ref)
}
// FamiliarString returns the familiar string representation
// for the given reference, familiarizing if needed.
//
// Deprecated: use [reference.FamiliarString].
func FamiliarString(ref reference.Reference) string {
return reference.FamiliarString(ref)
}
// FamiliarMatch reports whether ref matches the specified pattern.
// See [path.Match] for supported patterns.
//
// Deprecated: use [reference.FamiliarMatch].
func FamiliarMatch(pattern string, ref reference.Reference) (bool, error) {
return reference.FamiliarMatch(pattern, ref)
}

View file

@ -1,92 +0,0 @@
package reference
import (
"regexp"
"github.com/distribution/reference"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/go-digest/digestset"
)
// ParseNormalizedNamed parses a string into a named reference
// transforming a familiar name from Docker UI to a fully
// qualified reference. If the value may be an identifier
// use ParseAnyReference.
//
// Deprecated: use [reference.ParseNormalizedNamed].
func ParseNormalizedNamed(s string) (reference.Named, error) {
return reference.ParseNormalizedNamed(s)
}
// ParseDockerRef normalizes the image reference following the docker convention,
// which allows for references to contain both a tag and a digest.
//
// Deprecated: use [reference.ParseDockerRef].
func ParseDockerRef(ref string) (reference.Named, error) {
return reference.ParseDockerRef(ref)
}
// TagNameOnly adds the default tag "latest" to a reference if it only has
// a repo name.
//
// Deprecated: use [reference.TagNameOnly].
func TagNameOnly(ref reference.Named) reference.Named {
return reference.TagNameOnly(ref)
}
// ParseAnyReference parses a reference string as a possible identifier,
// full digest, or familiar name.
//
// Deprecated: use [reference.ParseAnyReference].
func ParseAnyReference(ref string) (reference.Reference, error) {
return reference.ParseAnyReference(ref)
}
// Functions and types below have been removed in distribution v3 and
// have not been ported to github.com/distribution/reference. See
// https://github.com/distribution/distribution/pull/3774
var (
// ShortIdentifierRegexp is the format used to represent a prefix
// of an identifier. A prefix may be used to match a sha256 identifier
// within a list of trusted identifiers.
//
// Deprecated: support for short-identifiers is deprecated, and will be removed in v3.
ShortIdentifierRegexp = regexp.MustCompile(shortIdentifier)
shortIdentifier = `([a-f0-9]{6,64})`
// anchoredShortIdentifierRegexp is used to check if a value
// is a possible identifier prefix, anchored at start and end
// of string.
anchoredShortIdentifierRegexp = regexp.MustCompile(`^` + shortIdentifier + `$`)
)
type digestReference digest.Digest
func (d digestReference) String() string {
return digest.Digest(d).String()
}
func (d digestReference) Digest() digest.Digest {
return digest.Digest(d)
}
// ParseAnyReferenceWithSet parses a reference string as a possible short
// identifier to be matched in a digest set, a full digest, or familiar name.
//
// Deprecated: support for short-identifiers is deprecated, and will be removed in v3.
func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
dgst, err := ds.Lookup(ref)
if err == nil {
return digestReference(dgst), nil
}
} else {
if dgst, err := digest.Parse(ref); err == nil {
return digestReference(dgst), nil
}
}
return reference.ParseNormalizedNamed(ref)
}

View file

@ -1,172 +0,0 @@
// Package reference is deprecated, and has moved to github.com/distribution/reference.
//
// Deprecated: use github.com/distribution/reference instead.
package reference
import (
"github.com/distribution/reference"
"github.com/opencontainers/go-digest"
)
const (
// NameTotalLengthMax is the maximum total number of characters in a repository name.
//
// Deprecated: use [reference.NameTotalLengthMax].
NameTotalLengthMax = reference.NameTotalLengthMax
)
var (
// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
//
// Deprecated: use [reference.ErrReferenceInvalidFormat].
ErrReferenceInvalidFormat = reference.ErrReferenceInvalidFormat
// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
//
// Deprecated: use [reference.ErrTagInvalidFormat].
ErrTagInvalidFormat = reference.ErrTagInvalidFormat
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
//
// Deprecated: use [reference.ErrDigestInvalidFormat].
ErrDigestInvalidFormat = reference.ErrDigestInvalidFormat
// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
//
// Deprecated: use [reference.ErrNameContainsUppercase].
ErrNameContainsUppercase = reference.ErrNameContainsUppercase
// ErrNameEmpty is returned for empty, invalid repository names.
//
// Deprecated: use [reference.ErrNameEmpty].
ErrNameEmpty = reference.ErrNameEmpty
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
//
// Deprecated: use [reference.ErrNameTooLong].
ErrNameTooLong = reference.ErrNameTooLong
// ErrNameNotCanonical is returned when a name is not canonical.
//
// Deprecated: use [reference.ErrNameNotCanonical].
ErrNameNotCanonical = reference.ErrNameNotCanonical
)
// Reference is an opaque object reference identifier that may include
// modifiers such as a hostname, name, tag, and digest.
//
// Deprecated: use [reference.Reference].
type Reference = reference.Reference
// Field provides a wrapper type for resolving correct reference types when
// working with encoding.
//
// Deprecated: use [reference.Field].
type Field = reference.Field
// AsField wraps a reference in a Field for encoding.
//
// Deprecated: use [reference.AsField].
func AsField(ref reference.Reference) reference.Field {
return reference.AsField(ref)
}
// Named is an object with a full name
//
// Deprecated: use [reference.Named].
type Named = reference.Named
// Tagged is an object which has a tag
//
// Deprecated: use [reference.Tagged].
type Tagged = reference.Tagged
// NamedTagged is an object including a name and tag.
//
// Deprecated: use [reference.NamedTagged].
type NamedTagged reference.NamedTagged
// Digested is an object which has a digest
// in which it can be referenced by
//
// Deprecated: use [reference.Digested].
type Digested reference.Digested
// Canonical reference is an object with a fully unique
// name including a name with domain and digest
//
// Deprecated: use [reference.Canonical].
type Canonical reference.Canonical
// Domain returns the domain part of the [Named] reference.
//
// Deprecated: use [reference.Domain].
func Domain(named reference.Named) string {
return reference.Domain(named)
}
// Path returns the name without the domain part of the [Named] reference.
//
// Deprecated: use [reference.Path].
func Path(named reference.Named) (name string) {
return reference.Path(named)
}
// SplitHostname splits a named reference into a
// hostname and name string. If no valid hostname is
// found, the hostname is empty and the full value
// is returned as name
//
// Deprecated: Use [reference.Domain] or [reference.Path].
func SplitHostname(named reference.Named) (string, string) {
return reference.SplitHostname(named)
}
// Parse parses s and returns a syntactically valid Reference.
// If an error was encountered it is returned, along with a nil Reference.
//
// Deprecated: use [reference.Parse].
func Parse(s string) (reference.Reference, error) {
return reference.Parse(s)
}
// ParseNamed parses s and returns a syntactically valid reference implementing
// the Named interface. The reference must have a name and be in the canonical
// form, otherwise an error is returned.
// If an error was encountered it is returned, along with a nil Reference.
//
// Deprecated: use [reference.ParseNamed].
func ParseNamed(s string) (reference.Named, error) {
return reference.ParseNamed(s)
}
// WithName returns a named object representing the given string. If the input
// is invalid ErrReferenceInvalidFormat will be returned.
//
// Deprecated: use [reference.WithName].
func WithName(name string) (reference.Named, error) {
return reference.WithName(name)
}
// WithTag combines the name from "name" and the tag from "tag" to form a
// reference incorporating both the name and the tag.
//
// Deprecated: use [reference.WithTag].
func WithTag(name reference.Named, tag string) (reference.NamedTagged, error) {
return reference.WithTag(name, tag)
}
// WithDigest combines the name from "name" and the digest from "digest" to form
// a reference incorporating both the name and the digest.
//
// Deprecated: use [reference.WithDigest].
func WithDigest(name reference.Named, digest digest.Digest) (reference.Canonical, error) {
return reference.WithDigest(name, digest)
}
// TrimNamed removes any tag or digest from the named reference.
//
// Deprecated: use [reference.TrimNamed].
func TrimNamed(ref reference.Named) reference.Named {
return reference.TrimNamed(ref)
}

View file

@ -1,50 +0,0 @@
package reference
import (
"github.com/distribution/reference"
)
// DigestRegexp matches well-formed digests, including algorithm (e.g. "sha256:<encoded>").
//
// Deprecated: use [reference.DigestRegexp].
var DigestRegexp = reference.DigestRegexp
// DomainRegexp matches hostname or IP-addresses, optionally including a port
// number. It defines the structure of potential domain components that may be
// part of image names. This is purposely a subset of what is allowed by DNS to
// ensure backwards compatibility with Docker image names. It may be a subset of
// DNS domain name, an IPv4 address in decimal format, or an IPv6 address between
// square brackets (excluding zone identifiers as defined by [RFC 6874] or special
// addresses such as IPv4-Mapped).
//
// Deprecated: use [reference.DomainRegexp].
//
// [RFC 6874]: https://www.rfc-editor.org/rfc/rfc6874.
var DomainRegexp = reference.DigestRegexp
// IdentifierRegexp is the format for string identifier used as a
// content addressable identifier using sha256. These identifiers
// are like digests without the algorithm, since sha256 is used.
//
// Deprecated: use [reference.IdentifierRegexp].
var IdentifierRegexp = reference.IdentifierRegexp
// NameRegexp is the format for the name component of references, including
// an optional domain and port, but without tag or digest suffix.
//
// Deprecated: use [reference.NameRegexp].
var NameRegexp = reference.NameRegexp
// ReferenceRegexp is the full supported format of a reference. The regexp
// is anchored and has capturing groups for name, tag, and digest
// components.
//
// Deprecated: use [reference.ReferenceRegexp].
var ReferenceRegexp = reference.ReferenceRegexp
// TagRegexp matches valid tag names. From [docker/docker:graph/tags.go].
//
// Deprecated: use [reference.TagRegexp].
//
// [docker/docker:graph/tags.go]: https://github.com/moby/moby/blob/v1.6.0/graph/tags.go#L26-L28
var TagRegexp = reference.TagRegexp

View file

@ -1,10 +0,0 @@
package reference
import "github.com/distribution/reference"
// Sort sorts string references preferring higher information references.
//
// Deprecated: use [reference.Sort].
func Sort(references []string) []string {
return reference.Sort(references)
}

View file

@ -1,66 +1,284 @@
# This file lists all individuals having contributed content to the repository. # This file lists all individuals having contributed content to the repository.
# For how it is generated, see `scripts/generate-authors.sh`. # For how it is generated, see hack/dockerfiles/authors.Dockerfile.
a-palchikov <deemok@gmail.com>
Aaron L. Xu <likexu@harmonycloud.cn> Aaron L. Xu <likexu@harmonycloud.cn>
Aaron Lehmann <aaron.lehmann@docker.com> Aaron Lehmann <aaron.lehmann@docker.com>
Aaron Lehmann <alehmann@netflix.com>
Abdur Rehman <abdur_rehman@mentor.com>
Addam Hardy <addam.hardy@gmail.com>
Adrian Plata <adrian.plata@docker.com>
Aidan Hobson Sayers <aidanhs@cantab.net>
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
Alan Fregtman <941331+darkvertex@users.noreply.github.com>
Alex Couture-Beil <alex@earthly.dev>
Alex Mayer <amayer5125@gmail.com>
Alex Suraci <suraci.alex@gmail.com>
Alexander Morozov <lk4d4@docker.com> Alexander Morozov <lk4d4@docker.com>
Alexis Murzeau <amubtdx@gmail.com>
Alice Frosi <afrosi@de.ibm.com> Alice Frosi <afrosi@de.ibm.com>
Allen Sun <allen.sun@daocloud.io> Allen Sun <allen.sun@daocloud.io>
Amen Belayneh <amenbelayneh@gmail.com>
Anca Iordache <anca.iordache@docker.com>
Anda Xu <anda.xu@docker.com> Anda Xu <anda.xu@docker.com>
Anders F Björklund <anders.f.bjorklund@gmail.com>
Andrea Bolognani <abologna@redhat.com>
Andrea Luzzardi <aluzzardi@gmail.com>
Andrew Chang <chang331006@gmail.com>
Andrey Smirnov <smirnov.andrey@gmail.com>
Andy Alt <andy5995@users.noreply.github.com>
Andy Caldwell <andrew.caldwell@metaswitch.com>
Ankush Agarwal <ankushagarwal11@gmail.com>
Anthony Sottile <asottile@umich.edu> Anthony Sottile <asottile@umich.edu>
Anurag Goel <anurag@render.com>
Anusha Ragunathan <anusha@docker.com>
Arnaud Bailly <arnaud.oqube@gmail.com> Arnaud Bailly <arnaud.oqube@gmail.com>
Avi Deitcher <avi@deitcher.net>
Bastiaan Bakker <bbakker@xebia.com>
Ben Longo <benlongo9807@gmail.com>
Bertrand Paquet <bertrand.paquet@gmail.com>
Bin Liu <liubin0329@gmail.com> Bin Liu <liubin0329@gmail.com>
Brandon Mitchell <git@bmitch.net>
Brian Goff <cpuguy83@gmail.com> Brian Goff <cpuguy83@gmail.com>
Ce Gao <ce.gao@outlook.com>
Chaerim Yeo <yeochaerim@gmail.com>
Changwei Ge <gechangwei@bytedance.com>
Chanhun Jeong <chanhun.jeong@navercorp.com>
ChaosGramer <ChaosGramer@users.noreply.github.com>
Charles Chan <charleswhchan@users.noreply.github.com>
Charles Korn <me@charleskorn.com>
Charles Law <claw@conduce.com>
Chenbin <chen.bin11@zte.com.cn>
Chris Goller <goller@gmail.com>
Chris McKinnel <chrismckinnel@gmail.com>
Christian Höltje <docwhat@gerf.org>
Christian Weichel <chris@gitpod.io>
Ciro S. Costa <cscosta@pivotal.io>
Claudiu Belu <cbelu@cloudbasesolutions.com>
Colin Chartier <colin.n.chartier@gmail.com>
Corey Larson <corey@earthly.dev>
Cory Bennett <cbennett@netflix.com>
Cory Snider <csnider@mirantis.com>
coryb <cbennett@netflix.com>
CrazyMax <github@crazymax.dev>
Csaba Apagyi <csaba.apagyi@gmail.com>
Dan Duvall <dduvall@wikimedia.org>
Daniel Cassidy <mail@danielcassidy.me.uk>
Daniel Nephin <dnephin@gmail.com> Daniel Nephin <dnephin@gmail.com>
Darren Shepherd <darren@rancher.com>
Dave Chen <dave.chen@arm.com> Dave Chen <dave.chen@arm.com>
Dave Henderson <dhenderson@gmail.com>
Dave Tucker <dt@docker.com>
David Calavera <david.calavera@gmail.com> David Calavera <david.calavera@gmail.com>
David Dooling <dooling@gmail.com>
David Gageot <david.gageot@docker.com>
David Karlsson <david.karlsson@docker.com>
Davis Schirmer <djds@bghost.xyz>
Dennis Chen <dennis.chen@arm.com> Dennis Chen <dennis.chen@arm.com>
dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Derek McGowan <derek@mcgstyle.net> Derek McGowan <derek@mcgstyle.net>
Dharmit Shah <shahdharmit@gmail.com>
Ding Fei <dingfei@stars.org.cn>
dito <itodaisuke00@gmail.com>
Doug Davis <dug@us.ibm.com> Doug Davis <dug@us.ibm.com>
Edgar Lee <edgarl@netflix.com> Edgar Lee <edgarhinshunlee@gmail.com>
Eli Uriegas <eli.uriegas@docker.com> Eli Uriegas <eli.uriegas@docker.com>
Elias Faxö <elias.faxo@tre.se>
Eng Zer Jun <engzerjun@gmail.com>
Eric Engestrom <eric@engestrom.ch>
Erik Sipsma <erik@sipsma.dev>
eyherabh <hugogabriel.eyherabide@gmail.com>
f0 <f0@users.noreply.github.com> f0 <f0@users.noreply.github.com>
Fernando Miguel <github@FernandoMiguel.net> Fernando Miguel <github@FernandoMiguel.net>
Fiona Klute <fiona.klute@gmx.de>
Foysal Iqbal <foysal.iqbal.fb@gmail.com>
Fred Cox <mcfedr@gmail.com>
Frieder Bluemle <frieder.bluemle@gmail.com>
Gabriel <samfiragabriel@gmail.com>
Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com>
Gaetan de Villele <gdevillele@gmail.com>
Gahl Saraf <saraf.gahl@gmail.com>
genglu.gl <luzigeng32@163.com>
George <george@betterde.com>
ggjulio <juligonz@student.42.fr>
Govind Rai <raigovind93@gmail.com>
Grant Reaber <grant.reaber@gmail.com>
Guilhem C <guilhem.charles@gmail.com>
Hans van den Bogert <hansbogert@gmail.com>
Hao Hu <hao.hu.fr@gmail.com> Hao Hu <hao.hu.fr@gmail.com>
Hector S <hfsam88@gmail.com>
Helen Xie <chenjg@harmonycloud.cn> Helen Xie <chenjg@harmonycloud.cn>
Himanshu Pandey <hpandey@pivotal.io> Himanshu Pandey <hpandey@pivotal.io>
Hiromu Nakamura <abctail30@gmail.com> Hiromu Nakamura <abctail30@gmail.com>
HowJMay <vulxj0j8j8@gmail.com>
Hugo Santos <hugo@namespacelabs.com>
Ian Campbell <ijc@docker.com> Ian Campbell <ijc@docker.com>
Ilya Dmitrichenko <errordeveloper@gmail.com>
Iskander (Alex) Sharipov <quasilyte@gmail.com> Iskander (Alex) Sharipov <quasilyte@gmail.com>
Jacob Gillespie <jacobwgillespie@gmail.com>
Jacob MacElroy <jacob@okteto.com>
Jean-Pierre Huynh <jean-pierre.huynh@ounet.fr> Jean-Pierre Huynh <jean-pierre.huynh@ounet.fr>
Jeffrey Huang <jeffreyhuang23@gmail.com>
Jesse Rittner <rittneje@gmail.com>
Jessica Frazelle <acidburn@microsoft.com> Jessica Frazelle <acidburn@microsoft.com>
jgeiger <jgeiger@gmail.com>
Jitender Kumar <jitender.kumar@intel.com>
jlecordier <jeanlecordier@hotmail.fr>
joey <zchengjoey@gmail.com>
John Howard <jhoward@microsoft.com> John Howard <jhoward@microsoft.com>
John Maguire <jmaguire@duosecurity.com>
John Mulhausen <john@docker.com>
John Tims <john.k.tims@gmail.com>
Jon Zeolla <zeolla@gmail.com>
Jonathan Azoff <azoff@users.noreply.github.com>
Jonathan Giannuzzi <jonathan@giannuzzi.me>
Jonathan Stoppani <jonathan.stoppani@divio.com> Jonathan Stoppani <jonathan.stoppani@divio.com>
Jonny Stoten <jonny.stoten@docker.com>
JordanGoasdoue <jordan.goasdoue@dailymotion.com>
jroenf <jeroenfranse@gmail.com>
Julian Goede <julian.goede@pm.me>
Justas Brazauskas <brazauskasjustas@gmail.com> Justas Brazauskas <brazauskasjustas@gmail.com>
Justin Chadwell <me@jedevc.com>
Justin Cormack <justin.cormack@docker.com> Justin Cormack <justin.cormack@docker.com>
Justin Garrison <justin@linux.com>
Jörg Franke <359489+NewJorg@users.noreply.github.com>
Kang, Matthew <impulsecss@gmail.com>
Kees Cook <keescook@chromium.org>
Kevin Burke <kev@inburke.com>
kevinmeredith <kevin.m.meredith@gmail.com>
Kir Kolyshkin <kolyshkin@gmail.com>
Kohei Tokunaga <ktokunaga.mail@gmail.com>
Koichi Shiraishi <zchee.io@gmail.com>
Kris-Mikael Krister <krismikael@protonmail.com>
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp> Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
Kyle <Kylemit@gmail.com>
l00397676 <lujingxiao@huawei.com>
Lajos Papp <lalyos@yahoo.com> Lajos Papp <lalyos@yahoo.com>
lalyos <lalyos@yahoo.com>
Levi Harrison <levisamuelharrison@gmail.com>
liwenqi <vikilwq@zju.edu.cn>
lixiaobing10051267 <li.xiaobing1@zte.com.cn>
lomot <lomot@qq.com>
Lu Jingxiao <lujingxiao@huawei.com>
Luca Visentin <luck.visentin@gmail.com>
Maciej Kalisz <mdkalish@users.noreply.github.com>
Madhav Puri <madhav.puri@gmail.com>
Manu Gupta <manugupt1@gmail.com>
Marcus Comstedt <marcus@mc.pp.se>
Mark Gordon <msg555@gmail.com>
Marko Kohtala <marko.kohtala@gmail.com>
Mary Anthony <mary@docker.com>
masibw <masi19bw@gmail.com>
Matias Insaurralde <matias@insaurral.de>
Matt Kang <impulsecss@gmail.com>
Matt Rickard <mrick@google.com> Matt Rickard <mrick@google.com>
Maxime Lagresle <maxime@angel.co>
Michael Crosby <crosbymichael@gmail.com> Michael Crosby <crosbymichael@gmail.com>
Michael Friis <friism@gmail.com>
Michael Irwin <mikesir87@gmail.com>
Miguel Ángel Jimeno <miguelangel4b@gmail.com>
Mihai Borobocea <MihaiBorob@gmail.com>
Mike Brown <brownwm@us.ibm.com>
mikelinjie <294893458@qq.com>
Mikhail Vasin <vasin@cloud-tv.ru>
Misty Stanley-Jones <misty@docker.com>
Miyachi Katsuya <miyachi_katsuya@r.recruit.co.jp> Miyachi Katsuya <miyachi_katsuya@r.recruit.co.jp>
Morgan Bauer <mbauer@us.ibm.com>
Morlay <morlay.null@gmail.com>
msg <msg@clinc.com>
Nao YONASHIRO <yonashiro@r.recruit.co.jp> Nao YONASHIRO <yonashiro@r.recruit.co.jp>
Natasha Jarus <linuxmercedes@gmail.com> Natasha Jarus <linuxmercedes@gmail.com>
Nathan Sullivan <nathan@nightsys.net>
Nick Miyake <nmiyake@users.noreply.github.com>
Nick Santos <nick.santos@docker.com>
Nikhil Pandeti <nikhil.pandeti@utexas.edu>
Noel Georgi <18496730+frezbo@users.noreply.github.com> Noel Georgi <18496730+frezbo@users.noreply.github.com>
Oliver Bristow <oliver.bristow@project-tracr.com>
Omer Duchovne <79370724+od-cyera@users.noreply.github.com>
Omer Mizrahi <ommizrah@microsoft.com>
Ondrej Fabry <ofabry@cisco.com> Ondrej Fabry <ofabry@cisco.com>
Otto Kekäläinen <otto@seravo.fi>
Pablo Chico de Guzman <pchico83@gmail.com>
Patrick Hemmer <patrick.hemmer@gmail.com>
Patrick Lang <plang@microsoft.com>
Patrick Van Stee <patrick@vanstee.me> Patrick Van Stee <patrick@vanstee.me>
Paul "TBBle" Hampson <Paul.Hampson@Pobox.com>
Paweł Gronowski <pawel.gronowski@docker.com>
Peter Dave Hello <hsu@peterdavehello.org>
Petr Fedchenkov <giggsoff@gmail.com>
Phil Estes <estesp@gmail.com>
Pierre Fenoll <pierrefenoll@gmail.com>
pieterdd <pieterdd@users.noreply.github.com>
Pranav Pandit <pranavp@microsoft.com>
Pratik Raj <rajpratik71@gmail.com>
Prayag Verma <prayag.verma@gmail.com>
Qiang Huang <h.huangqiang@huawei.com>
Remy Suen <remy.suen@gmail.com>
Ri Xu <xuri.me@gmail.com> Ri Xu <xuri.me@gmail.com>
Rob Taylor <rob@shape.build>
Robert Estelle <robertestelle@gmail.com>
Rubens Figueiredo <r.figueiredo.52@gmail.com>
Sam Whited <sam@samwhited.com>
Sascha Schwarze <schwarzs@de.ibm.com>
Sean P. Kane <spkane00@gmail.com>
Sebastiaan van Stijn <github@gone.nl> Sebastiaan van Stijn <github@gone.nl>
Seiya Miyata <odradek38@gmail.com>
Serhat Gülçiçek <serhat25@gmail.com>
Sertac Ozercan <sozercan@gmail.com>
Shev Yan <yandong_8212@163.com> Shev Yan <yandong_8212@163.com>
Shijiang Wei <mountkin@gmail.com>
Shingo Omura <everpeace@gmail.com>
Shiwei Zhang <shizh@microsoft.com>
Siebe Schaap <siebe@digibites.nl>
Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com>
Simon Ferquel <simon.ferquel@docker.com> Simon Ferquel <simon.ferquel@docker.com>
Slava Semushin <semushin@redhat.com>
Solomon Hykes <sh.github.6811@hykes.org>
squeegels <1674195+squeegels@users.noreply.github.com>
Stefan Scherer <stefan.scherer@docker.com>
Stefan Weil <sw@weilnetz.de> Stefan Weil <sw@weilnetz.de>
StefanSchoof <Stefan.Schoof@direkt-gruppe.de>
Stepan Blyshchak <stepanblischak@gmail.com>
Steve Lohr <schdief.law@gmail.com>
sunchunming <sunchunming1@jd.com>
Sven Dowideit <SvenDowideit@home.org.au>
Takuya Noguchi <takninnovationresearch@gmail.com>
Thomas Leonard <thomas.leonard@docker.com> Thomas Leonard <thomas.leonard@docker.com>
Thomas Riccardi <riccardi@systran.fr>
Thomas Shaw <tomwillfixit@users.noreply.github.com> Thomas Shaw <tomwillfixit@users.noreply.github.com>
Tianon Gravi <admwiggin@gmail.com>
Tibor Vass <tibor@docker.com> Tibor Vass <tibor@docker.com>
Tiffany Jernigan <tiffany.f.j@gmail.com> Tiffany Jernigan <tiffany.f.j@gmail.com>
Tim Waugh <twaugh@redhat.com>
Tim Wraight <tim.wraight@tangentlabs.co.uk>
Tino Rusch <tino.rusch@gmail.com> Tino Rusch <tino.rusch@gmail.com>
Tobias Klauser <tklauser@distanz.ch> Tobias Klauser <tklauser@distanz.ch>
Tomas Tomecek <ttomecek@redhat.com> Tomas Tomecek <ttomecek@redhat.com>
Tomasz Kopczynski <tomek@kopczynski.net.pl>
Tomohiro Kusumoto <zabio1192@gmail.com> Tomohiro Kusumoto <zabio1192@gmail.com>
Troels Liebe Bentsen <tlb@nversion.dk>
Tõnis Tiigi <tonistiigi@gmail.com> Tõnis Tiigi <tonistiigi@gmail.com>
Valentin Lorentz <progval+git@progval.net>
Vasek - Tom C <tom.chauveau@epitech.eu>
Victor Vieux <victorvieux@gmail.com>
Victoria Bialas <victoria.bialas@docker.com>
Vincent Demeester <vincent.demeester@docker.com> Vincent Demeester <vincent.demeester@docker.com>
Vlad A. Ionescu <vladaionescu@users.noreply.github.com>
Vladislav Ivanov <vlad@ivanov.email>
Wang Yumu <37442693@qq.com>
Wei Fu <fuweid89@gmail.com> Wei Fu <fuweid89@gmail.com>
Wei Zhang <kweizh@gmail.com>
wingkwong <wingkwong.code@gmail.com>
Xiaofan Zhang <xiaofan.zhang@clinc.com>
Ximo Guanter <ximo.guanter@gmail.com>
Yamazaki Masashi <masi19bw@gmail.com>
Yan Song <imeoer@linux.alibaba.com>
Yong Tang <yong.tang.github@outlook.com> Yong Tang <yong.tang.github@outlook.com>
Yuichiro Kaneko <spiketeika@gmail.com> Yuichiro Kaneko <spiketeika@gmail.com>
Yurii Rashkovskii <yrashk@gmail.com>
Zach Badgett <zach.badgett@gmail.com>
zhangwenlong <zhangwenlong8911@163.com>
Ziv Tsarfati <digger18@gmail.com> Ziv Tsarfati <digger18@gmail.com>
岁丰 <genglu.gl@antfin.com>
沈陵 <shenling.yyb@alibaba-inc.com>
郑泽宇 <perhapszzy@sina.com> 郑泽宇 <perhapszzy@sina.com>

View file

@ -367,21 +367,25 @@ func (m *UsageRecord) GetParents() []string {
} }
type SolveRequest struct { type SolveRequest struct {
Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"`
Definition *pb.Definition `protobuf:"bytes,2,opt,name=Definition,proto3" json:"Definition,omitempty"` Definition *pb.Definition `protobuf:"bytes,2,opt,name=Definition,proto3" json:"Definition,omitempty"`
Exporter string `protobuf:"bytes,3,opt,name=Exporter,proto3" json:"Exporter,omitempty"` // ExporterDeprecated and ExporterAttrsDeprecated are deprecated in favor
ExporterAttrs map[string]string `protobuf:"bytes,4,rep,name=ExporterAttrs,proto3" json:"ExporterAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // of the new Exporters. If these fields are set, then they will be
Session string `protobuf:"bytes,5,opt,name=Session,proto3" json:"Session,omitempty"` // appended to the Exporters field if Exporters was not explicitly set.
Frontend string `protobuf:"bytes,6,opt,name=Frontend,proto3" json:"Frontend,omitempty"` ExporterDeprecated string `protobuf:"bytes,3,opt,name=ExporterDeprecated,proto3" json:"ExporterDeprecated,omitempty"`
FrontendAttrs map[string]string `protobuf:"bytes,7,rep,name=FrontendAttrs,proto3" json:"FrontendAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` ExporterAttrsDeprecated map[string]string `protobuf:"bytes,4,rep,name=ExporterAttrsDeprecated,proto3" json:"ExporterAttrsDeprecated,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Cache CacheOptions `protobuf:"bytes,8,opt,name=Cache,proto3" json:"Cache"` Session string `protobuf:"bytes,5,opt,name=Session,proto3" json:"Session,omitempty"`
Entitlements []github_com_moby_buildkit_util_entitlements.Entitlement `protobuf:"bytes,9,rep,name=Entitlements,proto3,customtype=github.com/moby/buildkit/util/entitlements.Entitlement" json:"Entitlements,omitempty"` Frontend string `protobuf:"bytes,6,opt,name=Frontend,proto3" json:"Frontend,omitempty"`
FrontendInputs map[string]*pb.Definition `protobuf:"bytes,10,rep,name=FrontendInputs,proto3" json:"FrontendInputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` FrontendAttrs map[string]string `protobuf:"bytes,7,rep,name=FrontendAttrs,proto3" json:"FrontendAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Internal bool `protobuf:"varint,11,opt,name=Internal,proto3" json:"Internal,omitempty"` Cache CacheOptions `protobuf:"bytes,8,opt,name=Cache,proto3" json:"Cache"`
SourcePolicy *pb1.Policy `protobuf:"bytes,12,opt,name=SourcePolicy,proto3" json:"SourcePolicy,omitempty"` Entitlements []github_com_moby_buildkit_util_entitlements.Entitlement `protobuf:"bytes,9,rep,name=Entitlements,proto3,customtype=github.com/moby/buildkit/util/entitlements.Entitlement" json:"Entitlements,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` FrontendInputs map[string]*pb.Definition `protobuf:"bytes,10,rep,name=FrontendInputs,proto3" json:"FrontendInputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_unrecognized []byte `json:"-"` Internal bool `protobuf:"varint,11,opt,name=Internal,proto3" json:"Internal,omitempty"`
XXX_sizecache int32 `json:"-"` SourcePolicy *pb1.Policy `protobuf:"bytes,12,opt,name=SourcePolicy,proto3" json:"SourcePolicy,omitempty"`
Exporters []*Exporter `protobuf:"bytes,13,rep,name=Exporters,proto3" json:"Exporters,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
} }
func (m *SolveRequest) Reset() { *m = SolveRequest{} } func (m *SolveRequest) Reset() { *m = SolveRequest{} }
@ -431,16 +435,16 @@ func (m *SolveRequest) GetDefinition() *pb.Definition {
return nil return nil
} }
func (m *SolveRequest) GetExporter() string { func (m *SolveRequest) GetExporterDeprecated() string {
if m != nil { if m != nil {
return m.Exporter return m.ExporterDeprecated
} }
return "" return ""
} }
func (m *SolveRequest) GetExporterAttrs() map[string]string { func (m *SolveRequest) GetExporterAttrsDeprecated() map[string]string {
if m != nil { if m != nil {
return m.ExporterAttrs return m.ExporterAttrsDeprecated
} }
return nil return nil
} }
@ -494,6 +498,13 @@ func (m *SolveRequest) GetSourcePolicy() *pb1.Policy {
return nil return nil
} }
func (m *SolveRequest) GetExporters() []*Exporter {
if m != nil {
return m.Exporters
}
return nil
}
type CacheOptions struct { type CacheOptions struct {
// ExportRefDeprecated is deprecated in favor or the new Exports since BuildKit v0.4.0. // ExportRefDeprecated is deprecated in favor or the new Exports since BuildKit v0.4.0.
// When ExportRefDeprecated is set, the solver appends // When ExportRefDeprecated is set, the solver appends
@ -1832,11 +1843,12 @@ func (m *Descriptor) GetAnnotations() map[string]string {
} }
type BuildResultInfo struct { type BuildResultInfo struct {
Result *Descriptor `protobuf:"bytes,1,opt,name=Result,proto3" json:"Result,omitempty"` ResultDeprecated *Descriptor `protobuf:"bytes,1,opt,name=ResultDeprecated,proto3" json:"ResultDeprecated,omitempty"`
Attestations []*Descriptor `protobuf:"bytes,2,rep,name=Attestations,proto3" json:"Attestations,omitempty"` Attestations []*Descriptor `protobuf:"bytes,2,rep,name=Attestations,proto3" json:"Attestations,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` Results map[int64]*Descriptor `protobuf:"bytes,3,rep,name=Results,proto3" json:"Results,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_unrecognized []byte `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_sizecache int32 `json:"-"` XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
} }
func (m *BuildResultInfo) Reset() { *m = BuildResultInfo{} } func (m *BuildResultInfo) Reset() { *m = BuildResultInfo{} }
@ -1872,9 +1884,9 @@ func (m *BuildResultInfo) XXX_DiscardUnknown() {
var xxx_messageInfo_BuildResultInfo proto.InternalMessageInfo var xxx_messageInfo_BuildResultInfo proto.InternalMessageInfo
func (m *BuildResultInfo) GetResult() *Descriptor { func (m *BuildResultInfo) GetResultDeprecated() *Descriptor {
if m != nil { if m != nil {
return m.Result return m.ResultDeprecated
} }
return nil return nil
} }
@ -1886,8 +1898,18 @@ func (m *BuildResultInfo) GetAttestations() []*Descriptor {
return nil return nil
} }
func (m *BuildResultInfo) GetResults() map[int64]*Descriptor {
if m != nil {
return m.Results
}
return nil
}
// Exporter describes the output exporter
type Exporter struct { type Exporter struct {
Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` // Type identifies the exporter
Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"`
// Attrs specifies exporter configuration
Attrs map[string]string `protobuf:"bytes,2,rep,name=Attrs,proto3" json:"Attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Attrs map[string]string `protobuf:"bytes,2,rep,name=Attrs,proto3" json:"Attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
@ -1948,7 +1970,7 @@ func init() {
proto.RegisterType((*DiskUsageResponse)(nil), "moby.buildkit.v1.DiskUsageResponse") proto.RegisterType((*DiskUsageResponse)(nil), "moby.buildkit.v1.DiskUsageResponse")
proto.RegisterType((*UsageRecord)(nil), "moby.buildkit.v1.UsageRecord") proto.RegisterType((*UsageRecord)(nil), "moby.buildkit.v1.UsageRecord")
proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.SolveRequest") proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.SolveRequest")
proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.ExporterAttrsEntry") proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.ExporterAttrsDeprecatedEntry")
proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.FrontendAttrsEntry") proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.FrontendAttrsEntry")
proto.RegisterMapType((map[string]*pb.Definition)(nil), "moby.buildkit.v1.SolveRequest.FrontendInputsEntry") proto.RegisterMapType((map[string]*pb.Definition)(nil), "moby.buildkit.v1.SolveRequest.FrontendInputsEntry")
proto.RegisterType((*CacheOptions)(nil), "moby.buildkit.v1.CacheOptions") proto.RegisterType((*CacheOptions)(nil), "moby.buildkit.v1.CacheOptions")
@ -1979,6 +2001,7 @@ func init() {
proto.RegisterType((*Descriptor)(nil), "moby.buildkit.v1.Descriptor") proto.RegisterType((*Descriptor)(nil), "moby.buildkit.v1.Descriptor")
proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.Descriptor.AnnotationsEntry") proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.Descriptor.AnnotationsEntry")
proto.RegisterType((*BuildResultInfo)(nil), "moby.buildkit.v1.BuildResultInfo") proto.RegisterType((*BuildResultInfo)(nil), "moby.buildkit.v1.BuildResultInfo")
proto.RegisterMapType((map[int64]*Descriptor)(nil), "moby.buildkit.v1.BuildResultInfo.ResultsEntry")
proto.RegisterType((*Exporter)(nil), "moby.buildkit.v1.Exporter") proto.RegisterType((*Exporter)(nil), "moby.buildkit.v1.Exporter")
proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.Exporter.AttrsEntry") proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.Exporter.AttrsEntry")
} }
@ -1986,149 +2009,152 @@ func init() {
func init() { proto.RegisterFile("control.proto", fileDescriptor_0c5120591600887d) } func init() { proto.RegisterFile("control.proto", fileDescriptor_0c5120591600887d) }
var fileDescriptor_0c5120591600887d = []byte{ var fileDescriptor_0c5120591600887d = []byte{
// 2261 bytes of a gzipped FileDescriptorProto // 2315 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x59, 0xcd, 0x6e, 0x1b, 0xc9, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x59, 0x4f, 0x73, 0x1b, 0x49,
0x11, 0xde, 0x21, 0x25, 0xfe, 0x14, 0x29, 0x59, 0x6a, 0x7b, 0x8d, 0xc9, 0xc4, 0x2b, 0xc9, 0xb3, 0x15, 0xcf, 0x48, 0xb2, 0x2c, 0x3d, 0xc9, 0x8e, 0xdc, 0xc9, 0x86, 0x61, 0xc8, 0xda, 0xce, 0x6c,
0x76, 0x22, 0x38, 0xf6, 0x50, 0xcb, 0xac, 0x63, 0xaf, 0x9c, 0x38, 0x16, 0x45, 0x66, 0x2d, 0xc7, 0x02, 0xae, 0x90, 0x8c, 0xbc, 0x82, 0x90, 0xac, 0x03, 0x21, 0xb6, 0x25, 0x36, 0x0e, 0x49, 0xc5,
0x82, 0xb5, 0x2d, 0x79, 0x0d, 0x2c, 0xe0, 0x04, 0x23, 0xb2, 0x45, 0x0f, 0x34, 0x9c, 0x99, 0x74, 0xdb, 0x76, 0x36, 0xd4, 0x56, 0x05, 0x6a, 0x2c, 0xb5, 0x95, 0x29, 0x8f, 0x66, 0x86, 0xee, 0x96,
0x37, 0xb5, 0xe6, 0x3e, 0x40, 0x80, 0xcd, 0x21, 0xc8, 0x25, 0xc8, 0x25, 0xf7, 0x9c, 0x72, 0xce, 0x37, 0xde, 0x13, 0x27, 0xaa, 0xb8, 0x50, 0x5c, 0x28, 0x2e, 0xdc, 0x39, 0x71, 0xe6, 0xcc, 0x81,
0x13, 0x04, 0xf0, 0x31, 0xe7, 0x3d, 0x38, 0x81, 0x1f, 0x20, 0xc8, 0x31, 0xb9, 0x05, 0xfd, 0x33, 0xaa, 0x1c, 0x39, 0xef, 0x21, 0x50, 0xf9, 0x00, 0x14, 0x47, 0xb8, 0x6d, 0xf5, 0x9f, 0x91, 0x46,
0xe4, 0x90, 0x33, 0x94, 0x28, 0xdb, 0x27, 0x76, 0x75, 0xd7, 0x57, 0x53, 0x55, 0x5d, 0x5d, 0x5d, 0x9a, 0x91, 0x2d, 0x25, 0x39, 0xa9, 0x5f, 0xf7, 0xfb, 0xbd, 0x79, 0xef, 0xf5, 0xeb, 0xd7, 0xef,
0xd5, 0x84, 0x85, 0x76, 0x18, 0x70, 0x1a, 0xfa, 0x4e, 0x44, 0x43, 0x1e, 0xa2, 0xa5, 0x5e, 0x78, 0xb5, 0x60, 0xa1, 0x1d, 0x06, 0x9c, 0x86, 0xbe, 0x13, 0xd1, 0x90, 0x87, 0xa8, 0xd6, 0x0b, 0x0f,
0x38, 0x70, 0x0e, 0xfb, 0x9e, 0xdf, 0x39, 0xf6, 0xb8, 0x73, 0xf2, 0x89, 0x75, 0xab, 0xeb, 0xf1, 0x4e, 0x9c, 0x83, 0xbe, 0xe7, 0x77, 0x8e, 0x3c, 0xee, 0x1c, 0x7f, 0x6c, 0x35, 0xba, 0x1e, 0x7f,
0x17, 0xfd, 0x43, 0xa7, 0x1d, 0xf6, 0x6a, 0xdd, 0xb0, 0x1b, 0xd6, 0x24, 0xe3, 0x61, 0xff, 0x48, 0xd1, 0x3f, 0x70, 0xda, 0x61, 0xaf, 0xde, 0x0d, 0xbb, 0x61, 0xbd, 0x1b, 0x86, 0x5d, 0x9f, 0xb8,
0x52, 0x92, 0x90, 0x23, 0x25, 0xc0, 0x5a, 0xed, 0x86, 0x61, 0xd7, 0x27, 0x23, 0x2e, 0xee, 0xf5, 0x91, 0xc7, 0xf4, 0xb0, 0x4e, 0xa3, 0x76, 0x9d, 0x71, 0x97, 0xf7, 0x99, 0x92, 0x62, 0xdd, 0x1c,
0x08, 0xe3, 0x6e, 0x2f, 0xd2, 0x0c, 0x37, 0x13, 0xf2, 0xc4, 0xc7, 0x6a, 0xf1, 0xc7, 0x6a, 0x2c, 0xc7, 0xc8, 0xe9, 0x83, 0xfe, 0xa1, 0xa4, 0x24, 0x21, 0x47, 0x9a, 0xbd, 0x9e, 0x60, 0x17, 0xdf,
0xf4, 0x4f, 0x08, 0xad, 0x45, 0x87, 0xb5, 0x30, 0x62, 0x9a, 0xbb, 0x36, 0x95, 0xdb, 0x8d, 0xbc, 0xaf, 0xc7, 0xdf, 0xaf, 0xbb, 0x91, 0x57, 0xe7, 0x27, 0x11, 0x61, 0xf5, 0x2f, 0x43, 0x7a, 0x44,
0x1a, 0x1f, 0x44, 0x84, 0xd5, 0xbe, 0x0e, 0xe9, 0x31, 0xa1, 0x1a, 0x50, 0x9f, 0x54, 0x57, 0xe9, 0xa8, 0x06, 0xdc, 0x98, 0x08, 0x60, 0xa1, 0x7f, 0x4c, 0x68, 0x3d, 0x3a, 0xa8, 0x87, 0x51, 0xac,
0xe3, 0x46, 0x1e, 0xd3, 0xc3, 0x1a, 0x8d, 0xda, 0x35, 0xc6, 0x5d, 0xde, 0x8f, 0x3f, 0x72, 0xfb, 0xcd, 0xad, 0x53, 0xb8, 0xfb, 0xb4, 0x4d, 0xa2, 0xd0, 0xf7, 0xda, 0x27, 0x02, 0xa3, 0x46, 0x1a,
0x14, 0x95, 0xfa, 0xb4, 0x4d, 0xa2, 0xd0, 0xf7, 0xda, 0x03, 0xa1, 0x98, 0x1a, 0x29, 0x98, 0xfd, 0xb6, 0xa2, 0xad, 0x1b, 0xe8, 0xce, 0xbd, 0x1e, 0x61, 0xdc, 0xed, 0x45, 0x8a, 0xc1, 0xfe, 0xad,
0x5b, 0x03, 0xaa, 0x7b, 0xb4, 0x1f, 0x10, 0x4c, 0x7e, 0xd3, 0x27, 0x8c, 0xa3, 0xcb, 0x50, 0x38, 0x01, 0xd5, 0x5d, 0xda, 0x0f, 0x08, 0x26, 0xbf, 0xee, 0x13, 0xc6, 0xd1, 0x25, 0x28, 0x1e, 0x7a,
0xf2, 0x7c, 0x4e, 0xa8, 0x69, 0xac, 0xe5, 0xd7, 0xcb, 0x58, 0x53, 0x68, 0x09, 0xf2, 0xae, 0xef, 0x3e, 0x27, 0xd4, 0x34, 0x56, 0xf3, 0x6b, 0x65, 0xac, 0x29, 0x54, 0x83, 0xbc, 0xeb, 0xfb, 0x66,
0x9b, 0xb9, 0x35, 0x63, 0xbd, 0x84, 0xc5, 0x10, 0xad, 0x43, 0xf5, 0x98, 0x90, 0xa8, 0xd9, 0xa7, 0x6e, 0xd5, 0x58, 0x2b, 0x61, 0x31, 0x44, 0x6b, 0x50, 0x3d, 0x22, 0x24, 0x6a, 0xf6, 0xa9, 0xcb,
0x2e, 0xf7, 0xc2, 0xc0, 0xcc, 0xaf, 0x19, 0xeb, 0xf9, 0xc6, 0xdc, 0xab, 0xd7, 0xab, 0x06, 0x1e, 0xbd, 0x30, 0x30, 0xf3, 0xab, 0xc6, 0x5a, 0x7e, 0xab, 0xf0, 0xea, 0xf5, 0x8a, 0x81, 0x47, 0x56,
0x5b, 0x41, 0x36, 0x94, 0x05, 0xdd, 0x18, 0x70, 0xc2, 0xcc, 0xb9, 0x04, 0xdb, 0x68, 0xda, 0xbe, 0x90, 0x0d, 0x65, 0x41, 0x6f, 0x9d, 0x70, 0xc2, 0xcc, 0x42, 0x82, 0x6d, 0x38, 0x6d, 0x5f, 0x87,
0x01, 0x4b, 0x4d, 0x8f, 0x1d, 0x3f, 0x65, 0x6e, 0xf7, 0x2c, 0x5d, 0xec, 0x47, 0xb0, 0x9c, 0xe0, 0x5a, 0xd3, 0x63, 0x47, 0x4f, 0x99, 0xdb, 0x3d, 0x4b, 0x17, 0xfb, 0x21, 0x2c, 0x25, 0x78, 0x59,
0x65, 0x51, 0x18, 0x30, 0x82, 0x6e, 0x43, 0x81, 0x92, 0x76, 0x48, 0x3b, 0x92, 0xb9, 0x52, 0xff, 0x14, 0x06, 0x8c, 0xa0, 0x5b, 0x50, 0xa4, 0xa4, 0x1d, 0xd2, 0x8e, 0x64, 0xae, 0x34, 0x3e, 0x74,
0xc8, 0x99, 0x0c, 0x03, 0x47, 0x03, 0x04, 0x13, 0xd6, 0xcc, 0xf6, 0x9f, 0xf2, 0x50, 0x49, 0xcc, 0xc6, 0xc3, 0xc0, 0xd1, 0x00, 0xc1, 0x84, 0x35, 0xb3, 0xfd, 0xa7, 0x3c, 0x54, 0x12, 0xf3, 0x68,
0xa3, 0x45, 0xc8, 0xed, 0x34, 0x4d, 0x63, 0xcd, 0x58, 0x2f, 0xe3, 0xdc, 0x4e, 0x13, 0x99, 0x50, 0x11, 0x72, 0x3b, 0x4d, 0xd3, 0x58, 0x35, 0xd6, 0xca, 0x38, 0xb7, 0xd3, 0x44, 0x26, 0xcc, 0x3f,
0xdc, 0xed, 0x73, 0xf7, 0xd0, 0x27, 0xda, 0xf6, 0x98, 0x44, 0x97, 0x60, 0x7e, 0x27, 0x78, 0xca, 0xee, 0x73, 0xf7, 0xc0, 0x27, 0xda, 0xf6, 0x98, 0x44, 0x17, 0x61, 0x6e, 0x27, 0x78, 0xca, 0x88,
0x88, 0x34, 0xbc, 0x84, 0x15, 0x81, 0x10, 0xcc, 0xed, 0x7b, 0xdf, 0x10, 0x65, 0x26, 0x96, 0x63, 0x34, 0xbc, 0x84, 0x15, 0x81, 0x10, 0x14, 0xf6, 0xbc, 0xaf, 0x88, 0x32, 0x13, 0xcb, 0x31, 0xb2,
0x64, 0x41, 0x61, 0xcf, 0xa5, 0x24, 0xe0, 0xe6, 0xbc, 0x90, 0xdb, 0xc8, 0x99, 0x06, 0xd6, 0x33, 0xa0, 0xb8, 0xeb, 0x52, 0x12, 0x70, 0x73, 0x4e, 0xc8, 0xdd, 0xca, 0x99, 0x06, 0xd6, 0x33, 0x68,
0xa8, 0x01, 0xe5, 0x6d, 0x4a, 0x5c, 0x4e, 0x3a, 0x5b, 0xdc, 0x2c, 0xac, 0x19, 0xeb, 0x95, 0xba, 0x0b, 0xca, 0xdb, 0x94, 0xb8, 0x9c, 0x74, 0x36, 0xb9, 0x59, 0x5c, 0x35, 0xd6, 0x2a, 0x0d, 0xcb,
0xe5, 0xa8, 0x4d, 0x76, 0xe2, 0xf8, 0x73, 0x0e, 0xe2, 0xf8, 0x6b, 0x94, 0x5e, 0xbd, 0x5e, 0xfd, 0x51, 0xbb, 0xe6, 0xc4, 0xbb, 0xe6, 0xec, 0xc7, 0xbb, 0xb6, 0x55, 0x7a, 0xf5, 0x7a, 0xe5, 0xdc,
0xe0, 0x0f, 0xff, 0x14, 0xbe, 0x1b, 0xc2, 0xd0, 0x03, 0x80, 0xc7, 0x2e, 0xe3, 0x4f, 0x99, 0x14, 0x1f, 0xfe, 0x25, 0x7c, 0x37, 0x80, 0xa1, 0xfb, 0x00, 0x8f, 0x5c, 0xc6, 0x9f, 0x32, 0x29, 0x64,
0x52, 0x3c, 0x53, 0xc8, 0x9c, 0x14, 0x90, 0xc0, 0xa0, 0x15, 0x00, 0xe9, 0x84, 0xed, 0xb0, 0x1f, 0xfe, 0x4c, 0x21, 0x05, 0x29, 0x20, 0x81, 0x41, 0xcb, 0x00, 0xd2, 0x09, 0xdb, 0x61, 0x3f, 0xe0,
0x70, 0xb3, 0x24, 0x75, 0x4f, 0xcc, 0xa0, 0x35, 0xa8, 0x34, 0x09, 0x6b, 0x53, 0x2f, 0x92, 0x5b, 0x66, 0x49, 0xea, 0x9e, 0x98, 0x41, 0xab, 0x50, 0x69, 0x12, 0xd6, 0xa6, 0x5e, 0x24, 0xb7, 0xba,
0x5d, 0x96, 0xee, 0x49, 0x4e, 0x09, 0x09, 0xca, 0x83, 0x07, 0x83, 0x88, 0x98, 0x20, 0x19, 0x12, 0x2c, 0xdd, 0x93, 0x9c, 0x12, 0x12, 0x94, 0x07, 0xf7, 0x4f, 0x22, 0x62, 0x82, 0x64, 0x48, 0xcc,
0x33, 0x62, 0x2f, 0xf7, 0x5f, 0xb8, 0x94, 0x74, 0xcc, 0x8a, 0x74, 0x97, 0xa6, 0x84, 0x7f, 0x95, 0x88, 0xbd, 0xdc, 0x7b, 0xe1, 0x52, 0xd2, 0x31, 0x2b, 0xd2, 0x5d, 0x9a, 0x12, 0xfe, 0x55, 0x9e,
0x27, 0x98, 0x59, 0x95, 0x9b, 0x1c, 0x93, 0xf6, 0xef, 0x8a, 0x50, 0xdd, 0x17, 0xc7, 0x29, 0x0e, 0x60, 0x66, 0x55, 0x6e, 0x72, 0x4c, 0xda, 0xbf, 0x29, 0x41, 0x75, 0x4f, 0x1c, 0x85, 0x38, 0x1c,
0x87, 0x25, 0xc8, 0x63, 0x72, 0xa4, 0xf7, 0x46, 0x0c, 0x91, 0x03, 0xd0, 0x24, 0x47, 0x5e, 0xe0, 0x6a, 0x90, 0xc7, 0xe4, 0x50, 0xef, 0x8d, 0x18, 0x22, 0x07, 0xa0, 0x49, 0x0e, 0xbd, 0xc0, 0x93,
0x49, 0xad, 0x72, 0xd2, 0xf0, 0x45, 0x27, 0x3a, 0x74, 0x46, 0xb3, 0x38, 0xc1, 0x81, 0x2c, 0x28, 0x5a, 0xe5, 0xa4, 0xe1, 0x8b, 0x4e, 0x74, 0xe0, 0x0c, 0x67, 0x71, 0x82, 0x03, 0x39, 0x80, 0x5a,
0xb5, 0x5e, 0x46, 0x21, 0x15, 0x21, 0x95, 0x97, 0x62, 0x86, 0x34, 0x7a, 0x06, 0x0b, 0xf1, 0x78, 0x2f, 0xa3, 0x90, 0x72, 0x42, 0x9b, 0x24, 0xa2, 0xa4, 0x2d, 0x1c, 0x28, 0xf7, 0xaf, 0x8c, 0x33,
0x8b, 0x73, 0x2a, 0x02, 0x55, 0x84, 0xd1, 0x27, 0xe9, 0x30, 0x4a, 0x2a, 0xe5, 0x8c, 0x61, 0x5a, 0x56, 0x50, 0x1f, 0xbe, 0x15, 0xcf, 0x6e, 0x72, 0x4e, 0x59, 0x02, 0x54, 0x90, 0x41, 0x76, 0x37,
0x01, 0xa7, 0x03, 0x3c, 0x2e, 0x47, 0x58, 0xb8, 0x4f, 0x18, 0x13, 0x1a, 0xca, 0xed, 0xc7, 0x31, 0x1d, 0x64, 0x49, 0x95, 0x9d, 0x09, 0xe8, 0x56, 0xc0, 0xe9, 0x09, 0x9e, 0x24, 0x5b, 0xf8, 0x64,
0x29, 0xd4, 0xf9, 0x05, 0x0d, 0x03, 0x4e, 0x82, 0x8e, 0xdc, 0xfa, 0x32, 0x1e, 0xd2, 0x42, 0x9d, 0x8f, 0x30, 0x26, 0x6c, 0x92, 0x01, 0x83, 0x63, 0x12, 0x59, 0x50, 0xfa, 0x19, 0x0d, 0x03, 0x4e,
0x78, 0xac, 0xd4, 0x29, 0xce, 0xa4, 0xce, 0x18, 0x46, 0xab, 0x33, 0x36, 0x87, 0x36, 0x61, 0x7e, 0x82, 0x8e, 0x0c, 0x96, 0x32, 0x1e, 0xd0, 0xe8, 0x19, 0x2c, 0xc4, 0x63, 0x29, 0xd0, 0x9c, 0x97,
0xdb, 0x6d, 0xbf, 0x20, 0x72, 0x97, 0x2b, 0xf5, 0x95, 0xb4, 0x40, 0xb9, 0xfc, 0x44, 0x6e, 0x2b, 0x2a, 0x7e, 0x7c, 0x86, 0x8a, 0x23, 0x18, 0xa5, 0xd8, 0xa8, 0x1c, 0xb4, 0x01, 0x73, 0xdb, 0x6e,
0x93, 0x07, 0xf5, 0x03, 0xac, 0x20, 0xe8, 0x57, 0x50, 0x6d, 0x05, 0xdc, 0xe3, 0x3e, 0xe9, 0xc9, 0xfb, 0x05, 0x91, 0x71, 0x51, 0x69, 0x2c, 0xa7, 0x05, 0xca, 0xe5, 0x27, 0x32, 0x10, 0x98, 0x3c,
0x1d, 0x2b, 0x8b, 0x1d, 0x6b, 0x6c, 0x7e, 0xf7, 0x7a, 0xf5, 0x27, 0x53, 0xd3, 0x4f, 0x9f, 0x7b, 0xda, 0xe7, 0xb0, 0x82, 0xa0, 0x5f, 0x42, 0xb5, 0x15, 0x70, 0x8f, 0xfb, 0xa4, 0x27, 0xf7, 0xb8,
0x7e, 0x8d, 0x24, 0x50, 0x4e, 0x42, 0x04, 0x1e, 0x93, 0x87, 0xbe, 0x82, 0xc5, 0x58, 0xd9, 0x9d, 0x2c, 0xf6, 0x78, 0x6b, 0xe3, 0xeb, 0xd7, 0x2b, 0x3f, 0x9a, 0x98, 0xd1, 0xfa, 0xdc, 0xf3, 0xeb,
0x20, 0xea, 0x73, 0x66, 0x82, 0xb4, 0xba, 0x3e, 0xa3, 0xd5, 0x0a, 0xa4, 0xcc, 0x9e, 0x90, 0x24, 0x24, 0x81, 0x72, 0x12, 0x22, 0xf0, 0x88, 0x3c, 0xf4, 0x05, 0x2c, 0xc6, 0xca, 0xee, 0x04, 0x51,
0x9c, 0xbd, 0x13, 0x70, 0x42, 0x03, 0xd7, 0xd7, 0x21, 0x38, 0xa4, 0xd1, 0x8e, 0x88, 0x34, 0x91, 0x9f, 0x33, 0x13, 0xa4, 0xd5, 0x8d, 0x29, 0xad, 0x56, 0x20, 0x65, 0xf6, 0x98, 0x24, 0xe1, 0xec,
0x25, 0xf7, 0x64, 0x6e, 0x34, 0xab, 0xd2, 0x35, 0xd7, 0xd3, 0x5f, 0x4d, 0xe6, 0x52, 0x47, 0x31, 0x9d, 0x80, 0x13, 0x1a, 0xb8, 0xbe, 0x0e, 0xda, 0x01, 0x8d, 0x76, 0x44, 0x6c, 0x8a, 0xc4, 0xbb,
0xe3, 0x31, 0xa8, 0xf5, 0x00, 0x50, 0x3a, 0x24, 0x44, 0xe8, 0x1e, 0x93, 0x41, 0x1c, 0xba, 0xc7, 0x2b, 0xd3, 0xad, 0x59, 0x95, 0xae, 0xb9, 0x96, 0xfe, 0x6a, 0x32, 0x3d, 0x3b, 0x8a, 0x19, 0x8f,
0x64, 0x20, 0xb2, 0xc7, 0x89, 0xeb, 0xf7, 0x55, 0x56, 0x29, 0x63, 0x45, 0x6c, 0xe6, 0xee, 0x1a, 0x40, 0xd1, 0x1d, 0x28, 0xc7, 0x81, 0xc0, 0xcc, 0x05, 0xa9, 0xbd, 0x95, 0x96, 0x13, 0xb3, 0xe0,
0x42, 0x42, 0x7a, 0x17, 0xcf, 0x25, 0xe1, 0x0b, 0xb8, 0x98, 0xe1, 0x91, 0x0c, 0x11, 0xd7, 0x92, 0x21, 0xb3, 0xf5, 0x10, 0x2e, 0x9f, 0x16, 0x60, 0xe2, 0xc0, 0x1c, 0x91, 0x93, 0xf8, 0xc0, 0x1c,
0x22, 0xd2, 0x47, 0x67, 0x24, 0xd2, 0xfe, 0x6b, 0x1e, 0xaa, 0xc9, 0xb8, 0x40, 0x1b, 0x70, 0x51, 0x91, 0x13, 0x91, 0xb3, 0x8e, 0x5d, 0xbf, 0xaf, 0x72, 0x59, 0x19, 0x2b, 0x62, 0x23, 0x77, 0xc7,
0xd9, 0x89, 0xc9, 0x51, 0x93, 0x44, 0x94, 0xb4, 0x45, 0x32, 0xd2, 0xc2, 0xb3, 0x96, 0x50, 0x1d, 0xb0, 0xee, 0x03, 0x4a, 0x47, 0xc2, 0x4c, 0x12, 0x3e, 0x83, 0x0b, 0x19, 0x5e, 0xcd, 0x10, 0x71,
0x2e, 0xed, 0xf4, 0xf4, 0x34, 0x4b, 0x40, 0x72, 0xf2, 0xd8, 0x67, 0xae, 0xa1, 0x10, 0x3e, 0x54, 0x35, 0x29, 0x22, 0x7d, 0x60, 0x87, 0x22, 0xed, 0xbf, 0xe6, 0xa1, 0x9a, 0x8c, 0x2d, 0xb4, 0x0e,
0xa2, 0xa4, 0x27, 0x12, 0xa0, 0xbc, 0x8c, 0x8b, 0xcf, 0x4e, 0x0f, 0x5e, 0x27, 0x13, 0xab, 0xc2, 0x17, 0x94, 0xc5, 0x98, 0x1c, 0x26, 0x0e, 0xa3, 0x12, 0x9e, 0xb5, 0x84, 0x1a, 0x70, 0x71, 0xa7,
0x23, 0x5b, 0x2e, 0xfa, 0x19, 0x14, 0xd5, 0x42, 0x7c, 0xfe, 0x3f, 0x3e, 0xfd, 0x13, 0x4a, 0x58, 0xa7, 0xa7, 0x93, 0xe7, 0x37, 0x27, 0x93, 0x4d, 0xe6, 0x1a, 0x0a, 0xe1, 0x03, 0x25, 0x6a, 0xfc,
0x8c, 0x11, 0x70, 0x65, 0x07, 0x33, 0xe7, 0xcf, 0x01, 0xd7, 0x18, 0xeb, 0x21, 0x58, 0xd3, 0x55, 0xd0, 0xe7, 0xe5, 0xee, 0x7c, 0x72, 0xfa, 0x01, 0x70, 0x32, 0xb1, 0x2a, 0xc4, 0xb2, 0xe5, 0xa2,
0x3e, 0x4f, 0x08, 0xd8, 0x7f, 0x31, 0x60, 0x39, 0xf5, 0x21, 0x71, 0x39, 0xc9, 0xf4, 0xac, 0x44, 0x9f, 0xc0, 0xbc, 0x5a, 0x60, 0x3a, 0xaf, 0x7c, 0x74, 0xfa, 0x27, 0x94, 0xb0, 0x18, 0x23, 0xe0,
0xc8, 0x31, 0x6a, 0xc2, 0xbc, 0x4a, 0x30, 0x39, 0xa9, 0xb0, 0x33, 0x83, 0xc2, 0x4e, 0x22, 0xbb, 0xca, 0x0e, 0x66, 0xce, 0xcd, 0x00, 0xd7, 0x18, 0xeb, 0x01, 0x58, 0x93, 0x55, 0x9e, 0x25, 0x04,
0x28, 0xb0, 0x75, 0x17, 0xe0, 0xed, 0x82, 0xd5, 0xfe, 0x9b, 0x01, 0x0b, 0xfa, 0x30, 0xeb, 0x9b, 0xec, 0xbf, 0x18, 0xb0, 0x94, 0xfa, 0x90, 0xb8, 0x12, 0xe5, 0xa5, 0xa0, 0x44, 0xc8, 0x31, 0x6a,
0xdc, 0x85, 0xa5, 0xf8, 0x08, 0xc5, 0x73, 0xfa, 0x4e, 0xbf, 0x3d, 0x35, 0x0f, 0x28, 0x36, 0x67, 0xc2, 0x9c, 0x4a, 0x52, 0x39, 0xa9, 0xb0, 0x33, 0x85, 0xc2, 0x4e, 0x22, 0x43, 0x29, 0xb0, 0x75,
0x12, 0xa7, 0x74, 0x4c, 0x89, 0xb3, 0xb6, 0xe3, 0xb8, 0x9a, 0x60, 0x3d, 0x97, 0xe6, 0x57, 0x61, 0x07, 0xe0, 0xed, 0x82, 0xd5, 0xfe, 0x9b, 0x01, 0x0b, 0x3a, 0x21, 0xe8, 0xfa, 0xc1, 0x85, 0xda,
0x61, 0x5f, 0x96, 0x60, 0x53, 0x2f, 0x28, 0xfb, 0x3f, 0x06, 0x2c, 0xc6, 0x3c, 0xda, 0xba, 0x4f, 0xe0, 0x8c, 0xe9, 0x39, 0x5d, 0x49, 0xdc, 0x9a, 0x98, 0x4b, 0x14, 0x9b, 0x33, 0x8e, 0x53, 0x3a,
0xa1, 0x74, 0x42, 0x28, 0x27, 0x2f, 0x09, 0xd3, 0x56, 0x99, 0x69, 0xab, 0xbe, 0x94, 0x1c, 0x78, 0xa6, 0xc4, 0x59, 0xdb, 0x71, 0x5c, 0x8d, 0xb1, 0xce, 0xa4, 0xf9, 0x15, 0x58, 0xd8, 0x93, 0x75,
0xc8, 0x89, 0x36, 0xa1, 0xa4, 0xca, 0x3d, 0x12, 0x6f, 0xd4, 0xca, 0x34, 0x94, 0xfe, 0xde, 0x90, 0xea, 0xc4, 0x6b, 0xd1, 0xfe, 0xaf, 0x01, 0x8b, 0x31, 0x8f, 0xb6, 0xee, 0x87, 0x50, 0x3a, 0x26,
0x1f, 0xd5, 0x60, 0xce, 0x0f, 0xbb, 0x4c, 0x9f, 0x99, 0xef, 0x4f, 0xc3, 0x3d, 0x0e, 0xbb, 0x58, 0x94, 0x93, 0x97, 0x84, 0x69, 0xab, 0xcc, 0xb4, 0x55, 0x9f, 0x4b, 0x0e, 0x3c, 0xe0, 0x44, 0x1b,
0x32, 0xa2, 0x7b, 0x50, 0xfa, 0xda, 0xa5, 0x81, 0x17, 0x74, 0xe3, 0x53, 0xb0, 0x3a, 0x0d, 0xf4, 0x50, 0x52, 0x35, 0x31, 0x89, 0x37, 0x6a, 0x79, 0x12, 0x4a, 0x7f, 0x6f, 0xc0, 0x8f, 0xea, 0x50,
0x4c, 0xf1, 0xe1, 0x21, 0x40, 0x14, 0x54, 0x05, 0xb5, 0x86, 0x1e, 0x41, 0xa1, 0xe3, 0x75, 0x09, 0xf0, 0xc3, 0x2e, 0xd3, 0x67, 0xe6, 0x3b, 0x93, 0x70, 0x8f, 0xc2, 0x2e, 0x96, 0x8c, 0xe8, 0x2e,
0xe3, 0xca, 0x25, 0x8d, 0xba, 0xb8, 0x4b, 0xbe, 0x7b, 0xbd, 0x7a, 0x23, 0x71, 0x59, 0x84, 0x11, 0x94, 0xbe, 0x74, 0x69, 0xe0, 0x05, 0xdd, 0xf8, 0x14, 0xac, 0x4c, 0x02, 0x3d, 0x53, 0x7c, 0x78,
0x09, 0x44, 0xf9, 0xee, 0x7a, 0x01, 0xa1, 0xa2, 0xbc, 0xbd, 0xa5, 0x20, 0x4e, 0x53, 0xfe, 0x60, 0x00, 0x10, 0x65, 0x5c, 0x51, 0xad, 0xa1, 0x87, 0x50, 0xec, 0x78, 0x5d, 0xc2, 0xb8, 0x72, 0xc9,
0x2d, 0x41, 0xc8, 0xf2, 0xd4, 0x95, 0x20, 0xf3, 0xc5, 0xdb, 0xc9, 0x52, 0x12, 0xc4, 0x31, 0x08, 0x56, 0x43, 0xdc, 0x47, 0x5f, 0xbf, 0x5e, 0xb9, 0x9e, 0xb8, 0x70, 0xc2, 0x88, 0x04, 0xa2, 0x69,
0xdc, 0x1e, 0xd1, 0x25, 0x80, 0x1c, 0x8b, 0xfa, 0xa4, 0x2d, 0xe2, 0xbc, 0x23, 0x2b, 0xb7, 0x12, 0x70, 0xbd, 0x80, 0x50, 0xd1, 0x03, 0xdc, 0x54, 0x10, 0xa7, 0x29, 0x7f, 0xb0, 0x96, 0x20, 0x64,
0xd6, 0x14, 0xda, 0x84, 0x22, 0xe3, 0x2e, 0x15, 0x39, 0x67, 0x7e, 0xc6, 0xc2, 0x2a, 0x06, 0xa0, 0x79, 0xea, 0x5a, 0x91, 0xf9, 0xe2, 0xed, 0x64, 0x29, 0x09, 0xe2, 0x18, 0x04, 0x6e, 0x8f, 0xe8,
0xfb, 0x50, 0x6e, 0x87, 0xbd, 0xc8, 0x27, 0x02, 0x5d, 0x98, 0x11, 0x3d, 0x82, 0x88, 0xd0, 0x23, 0x72, 0x43, 0x8e, 0x45, 0x55, 0xd4, 0x16, 0x71, 0xde, 0x91, 0xf5, 0x62, 0x09, 0x6b, 0x0a, 0x6d,
0x94, 0x86, 0x54, 0x96, 0x74, 0x65, 0xac, 0x08, 0x74, 0x07, 0x16, 0x22, 0x1a, 0x76, 0x29, 0x61, 0xc0, 0x3c, 0xe3, 0x2e, 0x15, 0x39, 0x67, 0x6e, 0xca, 0x72, 0x2e, 0x06, 0xa0, 0x7b, 0x50, 0x6e,
0xec, 0x73, 0x1a, 0xf6, 0x23, 0x7d, 0x91, 0x2f, 0x8b, 0xe4, 0xbd, 0x97, 0x5c, 0xc0, 0xe3, 0x7c, 0x87, 0xbd, 0xc8, 0x27, 0x02, 0x5d, 0x9c, 0x12, 0x3d, 0x84, 0x88, 0xd0, 0x23, 0x94, 0x86, 0x54,
0xf6, 0xbf, 0x73, 0x50, 0x4d, 0x86, 0x48, 0xaa, 0xd6, 0x7d, 0x04, 0x05, 0x15, 0x70, 0x2a, 0xd6, 0x16, 0x92, 0x65, 0xac, 0x08, 0x74, 0x1b, 0x16, 0x22, 0x1a, 0x76, 0x29, 0x61, 0xec, 0x53, 0x1a,
0xdf, 0xce, 0xc7, 0x4a, 0x42, 0xa6, 0x8f, 0x4d, 0x28, 0xb6, 0xfb, 0x54, 0x16, 0xc2, 0xaa, 0x3c, 0xf6, 0x23, 0x5d, 0x0c, 0x2c, 0x89, 0xe4, 0xbd, 0x9b, 0x5c, 0xc0, 0xa3, 0x7c, 0xf6, 0x7f, 0x72,
0x8e, 0x49, 0x61, 0x29, 0x0f, 0xb9, 0xeb, 0x4b, 0x1f, 0xe7, 0xb1, 0x22, 0x44, 0x6d, 0x3c, 0xec, 0x50, 0x4d, 0x86, 0x48, 0xaa, 0xc2, 0x7e, 0x08, 0x45, 0x15, 0x70, 0x2a, 0xd6, 0xdf, 0xce, 0xc7,
0xbc, 0xce, 0x57, 0x1b, 0x0f, 0x61, 0xc9, 0xfd, 0x2b, 0xbe, 0xd3, 0xfe, 0x95, 0xce, 0xbd, 0x7f, 0x4a, 0x42, 0xa6, 0x8f, 0x4d, 0x98, 0x6f, 0xf7, 0xa9, 0x2c, 0xbf, 0x55, 0x51, 0x1e, 0x93, 0xc2,
0xf6, 0xdf, 0x0d, 0x28, 0x0f, 0xcf, 0x56, 0xc2, 0xbb, 0xc6, 0x3b, 0x7b, 0x77, 0xcc, 0x33, 0xb9, 0x52, 0x1e, 0x72, 0xd7, 0x97, 0x3e, 0xce, 0x63, 0x45, 0x88, 0x8a, 0x7c, 0xd0, 0x25, 0xcd, 0x56,
0xb7, 0xf3, 0xcc, 0x65, 0x28, 0x30, 0x4e, 0x89, 0xdb, 0x53, 0x9d, 0x1b, 0xd6, 0x94, 0xc8, 0x62, 0x91, 0x0f, 0x60, 0xc9, 0xfd, 0x9b, 0x7f, 0xa7, 0xfd, 0x2b, 0xcd, 0xbc, 0x7f, 0xf6, 0x3f, 0x0c,
0x3d, 0xd6, 0x95, 0x3b, 0x54, 0xc5, 0x62, 0x68, 0xff, 0xd7, 0x80, 0x85, 0xb1, 0xe3, 0xfe, 0x5e, 0x28, 0x0f, 0xce, 0x56, 0xc2, 0xbb, 0xc6, 0x3b, 0x7b, 0x77, 0xc4, 0x33, 0xb9, 0xb7, 0xf3, 0xcc,
0x6d, 0xb9, 0x04, 0xf3, 0x3e, 0x39, 0x21, 0xaa, 0xb7, 0xcc, 0x63, 0x45, 0x88, 0x59, 0xf6, 0x22, 0x25, 0x28, 0x32, 0x4e, 0x89, 0xdb, 0x53, 0xfd, 0x22, 0xd6, 0x94, 0xc8, 0x62, 0x3d, 0xd6, 0x95,
0xa4, 0x5c, 0x2a, 0x57, 0xc5, 0x8a, 0x10, 0x3a, 0x77, 0x08, 0x77, 0x3d, 0x5f, 0xe6, 0xa5, 0x2a, 0x3b, 0x54, 0xc5, 0x62, 0x68, 0xff, 0xcf, 0x80, 0x85, 0x91, 0xe3, 0xfe, 0x5e, 0x6d, 0xb9, 0x08,
0xd6, 0x94, 0xd0, 0xb9, 0x4f, 0x7d, 0x5d, 0x5f, 0x8b, 0x21, 0xb2, 0x61, 0xce, 0x0b, 0x8e, 0x42, 0x73, 0x3e, 0x39, 0x26, 0xaa, 0xa3, 0xcd, 0x63, 0x45, 0x88, 0x59, 0xf6, 0x22, 0xa4, 0x5c, 0x2a,
0x1d, 0x36, 0xb2, 0xb2, 0x51, 0x75, 0xda, 0x4e, 0x70, 0x14, 0x62, 0xb9, 0x86, 0xae, 0x42, 0x81, 0x57, 0xc5, 0x8a, 0x10, 0x3a, 0x77, 0x08, 0x77, 0x3d, 0x5f, 0xe6, 0xa5, 0x2a, 0xd6, 0x94, 0xd0,
0xba, 0x41, 0x97, 0xc4, 0xc5, 0x75, 0x59, 0x70, 0x61, 0x31, 0x83, 0xf5, 0x82, 0x6d, 0x43, 0x55, 0xb9, 0x4f, 0x7d, 0x5d, 0xa3, 0x8b, 0x21, 0xb2, 0xa1, 0xe0, 0x05, 0x87, 0xa1, 0x0e, 0x1b, 0x59,
0xf6, 0xa7, 0xbb, 0x84, 0x89, 0x6e, 0x48, 0x84, 0x75, 0xc7, 0xe5, 0xae, 0x34, 0xbb, 0x8a, 0xe5, 0xd9, 0xa8, 0x5a, 0x6f, 0x27, 0x38, 0x0c, 0xb1, 0x5c, 0x43, 0x57, 0xa0, 0x48, 0xdd, 0xa0, 0x4b,
0xd8, 0xbe, 0x09, 0xe8, 0xb1, 0xc7, 0xf8, 0x33, 0xd9, 0xc2, 0xb3, 0xb3, 0x9a, 0xd7, 0x7d, 0xb8, 0xe2, 0x02, 0xbd, 0x2c, 0xb8, 0xb0, 0x98, 0xc1, 0x7a, 0xc1, 0xb6, 0xa1, 0x2a, 0xbb, 0xe2, 0xc7,
0x38, 0xc6, 0xad, 0xaf, 0x85, 0x9f, 0x4e, 0xb4, 0xaf, 0xd7, 0xd2, 0x19, 0x57, 0xbe, 0x14, 0x38, 0x84, 0x89, 0x1e, 0x4c, 0x84, 0x75, 0xc7, 0xe5, 0xae, 0x34, 0xbb, 0x8a, 0xe5, 0xd8, 0xbe, 0x01,
0x0a, 0x38, 0xd1, 0xc5, 0x2e, 0x40, 0x45, 0xda, 0xa5, 0xbe, 0x6d, 0xbb, 0x50, 0x55, 0xa4, 0x16, 0xe8, 0x91, 0xc7, 0xf8, 0x33, 0xf9, 0xa6, 0xc0, 0xce, 0x6a, 0x99, 0xf7, 0xe0, 0xc2, 0x08, 0xb7,
0xfe, 0x05, 0x5c, 0x88, 0x05, 0x7d, 0x49, 0xa8, 0x6c, 0x45, 0x0c, 0xe9, 0x97, 0x1f, 0x4e, 0xfb, 0xbe, 0x16, 0x7e, 0x3c, 0xd6, 0x34, 0x5f, 0x4d, 0x67, 0x5c, 0xf9, 0x74, 0xe1, 0x28, 0xe0, 0x58,
0x4a, 0x63, 0x9c, 0x1d, 0x4f, 0xe2, 0x6d, 0x02, 0x17, 0x25, 0xcf, 0x43, 0x8f, 0xf1, 0x90, 0x0e, 0xef, 0xbc, 0x00, 0x15, 0x69, 0x97, 0xfa, 0xb6, 0xed, 0x42, 0x55, 0x91, 0x5a, 0xf8, 0x67, 0x70,
0x62, 0xab, 0x57, 0x00, 0xb6, 0xda, 0xdc, 0x3b, 0x21, 0x4f, 0x02, 0x5f, 0x5d, 0xa3, 0x25, 0x9c, 0x3e, 0x16, 0xf4, 0x39, 0xa1, 0xb2, 0x9d, 0x31, 0xa4, 0x5f, 0xbe, 0x37, 0xe9, 0x2b, 0x5b, 0xa3,
0x98, 0x89, 0xaf, 0xc8, 0xdc, 0xa8, 0x87, 0xbb, 0x02, 0xe5, 0x96, 0x4b, 0xfd, 0x41, 0xeb, 0xa5, 0xec, 0x78, 0x1c, 0x6f, 0x13, 0xb8, 0x20, 0x79, 0x1e, 0x78, 0x8c, 0x87, 0xf4, 0x24, 0xb6, 0x7a,
0xc7, 0x75, 0x2b, 0x3d, 0x9a, 0xb0, 0x7f, 0x6f, 0xc0, 0x72, 0xf2, 0x3b, 0xad, 0x13, 0x91, 0x2e, 0x19, 0x60, 0xb3, 0xcd, 0xbd, 0x63, 0xf2, 0x24, 0xf0, 0xd5, 0x35, 0x5a, 0xc2, 0x89, 0x99, 0xf8,
0xee, 0xc1, 0x1c, 0x8f, 0xeb, 0x98, 0xc5, 0x2c, 0x23, 0x52, 0x10, 0x51, 0xea, 0x60, 0x09, 0x4a, 0x8a, 0xcc, 0x0d, 0x3b, 0xc7, 0xcb, 0x50, 0x6e, 0xb9, 0xd4, 0x3f, 0x69, 0xbd, 0xf4, 0xb8, 0x6e,
0x78, 0x5a, 0x1d, 0x9c, 0x6b, 0xa7, 0xc3, 0x27, 0x3c, 0xfd, 0xbf, 0x12, 0xa0, 0xf4, 0x72, 0x46, 0xe0, 0x87, 0x13, 0xf6, 0xef, 0x0d, 0x58, 0x4a, 0x7e, 0xa7, 0x75, 0x2c, 0xd2, 0xc5, 0x5d, 0x28,
0x6f, 0x9a, 0x6c, 0xee, 0x72, 0x13, 0xcd, 0xdd, 0xf3, 0xc9, 0xe6, 0x4e, 0x5d, 0xcd, 0x77, 0x66, 0xf0, 0xb8, 0x8e, 0x59, 0xcc, 0x32, 0x22, 0x05, 0x11, 0xa5, 0x0e, 0x96, 0xa0, 0x84, 0xa7, 0xd5,
0xd1, 0x64, 0x86, 0x16, 0xef, 0x2e, 0x94, 0xe3, 0xea, 0x26, 0xbe, 0xc0, 0xad, 0xb4, 0xe8, 0x61, 0xc1, 0xb9, 0x7a, 0x3a, 0x7c, 0xcc, 0xd3, 0xff, 0x2f, 0x01, 0x4a, 0x2f, 0x67, 0x74, 0xc4, 0xc9,
0x01, 0x34, 0x62, 0x46, 0xeb, 0xf1, 0x8d, 0xa3, 0xee, 0x3a, 0x14, 0xe7, 0x14, 0x1a, 0xb5, 0x1d, 0x06, 0x31, 0x37, 0xd6, 0x20, 0x3e, 0x1f, 0x6f, 0x10, 0xd5, 0xd5, 0x7c, 0x7b, 0x1a, 0x4d, 0xa6,
0x5d, 0x57, 0xe8, 0x5b, 0xe8, 0xfe, 0xf9, 0xde, 0x2d, 0xe6, 0x26, 0xdf, 0x2c, 0x1a, 0x50, 0xd9, 0x68, 0x13, 0x47, 0xfa, 0x98, 0xc2, 0x0c, 0x7d, 0x0c, 0x5a, 0x8b, 0x6f, 0x1c, 0x75, 0xd7, 0xa1,
0x8e, 0x13, 0xe5, 0x39, 0x1e, 0x2d, 0x92, 0x20, 0xb4, 0xa1, 0x0b, 0x1b, 0x95, 0x9a, 0xaf, 0xa4, 0x38, 0xa7, 0xd0, 0xa8, 0xed, 0xe8, 0xba, 0x42, 0xdf, 0x42, 0xf7, 0x66, 0x7b, 0x2d, 0x29, 0x8c,
0x4d, 0x8c, 0x1f, 0x28, 0x42, 0xaa, 0x2b, 0x9b, 0xa3, 0x8c, 0xd2, 0xb2, 0x2c, 0x1d, 0xb4, 0x39, 0xbf, 0x94, 0x6c, 0x41, 0x65, 0x3b, 0x4e, 0x94, 0x33, 0x3c, 0x95, 0x24, 0x41, 0x68, 0x5d, 0x17,
0x93, 0xef, 0x67, 0xac, 0x2f, 0xd1, 0x67, 0x50, 0xc0, 0x84, 0xf5, 0x7d, 0x2e, 0x5f, 0x42, 0x2a, 0x36, 0x2a, 0x35, 0x5f, 0x4e, 0x9b, 0x18, 0x3f, 0x8b, 0x84, 0x54, 0x57, 0x36, 0x87, 0x19, 0xa5,
0xf5, 0xab, 0x53, 0xa4, 0x2b, 0x26, 0x79, 0x56, 0x35, 0x00, 0xfd, 0x12, 0x8a, 0x6a, 0xc4, 0xcc, 0x65, 0x59, 0x3a, 0x68, 0x63, 0x2a, 0xdf, 0x4f, 0x59, 0x5f, 0xa2, 0x4f, 0xa0, 0x88, 0x09, 0xeb,
0xca, 0xb4, 0x96, 0x3f, 0x43, 0x33, 0x8d, 0xd1, 0x0d, 0x85, 0xa6, 0xc4, 0x71, 0xfc, 0x9c, 0x04, 0xfb, 0x5c, 0xbe, 0xbf, 0x54, 0x1a, 0x57, 0x26, 0x48, 0x57, 0x4c, 0xf2, 0xac, 0x6a, 0x00, 0xfa,
0x44, 0xbf, 0xd0, 0x89, 0xb6, 0x76, 0x1e, 0x27, 0x66, 0x50, 0x1d, 0xe6, 0x39, 0x75, 0xdb, 0xc4, 0x39, 0xcc, 0xab, 0x11, 0x33, 0x2b, 0x93, 0x9e, 0x0d, 0x32, 0x34, 0xd3, 0x18, 0xdd, 0x50, 0x68,
0x5c, 0x98, 0xc1, 0x85, 0x8a, 0x55, 0x24, 0xb6, 0xc8, 0x0b, 0x02, 0xd2, 0x31, 0x17, 0x55, 0xa5, 0x4a, 0x1c, 0xc7, 0x4f, 0x49, 0x40, 0xf4, 0xbb, 0xa0, 0x68, 0x8d, 0xe7, 0x70, 0x62, 0x06, 0x35,
0xa4, 0x28, 0xf4, 0x03, 0x58, 0x0c, 0xfa, 0x3d, 0xd9, 0x2c, 0x74, 0xf6, 0x39, 0x89, 0x98, 0x79, 0x60, 0x8e, 0x53, 0xb7, 0x4d, 0xcc, 0x85, 0x29, 0x5c, 0xa8, 0x58, 0x45, 0x62, 0x8b, 0xbc, 0x20,
0x41, 0x7e, 0x6f, 0x62, 0x16, 0x5d, 0x83, 0x85, 0xa0, 0xdf, 0x3b, 0x10, 0x37, 0xbc, 0x62, 0x5b, 0x20, 0x1d, 0x73, 0x51, 0x55, 0x4a, 0x8a, 0x42, 0xdf, 0x85, 0xc5, 0xa0, 0xdf, 0x93, 0xcd, 0x42,
0x92, 0x6c, 0xe3, 0x93, 0xe8, 0x26, 0x2c, 0x0b, 0x5c, 0xbc, 0xdb, 0x8a, 0x73, 0x59, 0x72, 0xa6, 0x67, 0x8f, 0x93, 0x88, 0x99, 0xe7, 0xe5, 0xf7, 0xc6, 0x66, 0xd1, 0x55, 0x58, 0x08, 0xfa, 0xbd,
0x17, 0xde, 0x43, 0xcf, 0xfc, 0x3e, 0x3a, 0x02, 0xeb, 0x39, 0x54, 0x93, 0xfb, 0x90, 0x81, 0xbd, 0x7d, 0x71, 0xc3, 0x2b, 0xb6, 0x9a, 0x64, 0x1b, 0x9d, 0x44, 0x37, 0x60, 0x49, 0xe0, 0xe2, 0xdd,
0x33, 0xde, 0x71, 0xcf, 0x10, 0x17, 0x89, 0x86, 0xe3, 0x39, 0x7c, 0xef, 0x69, 0xd4, 0x71, 0x39, 0x56, 0x9c, 0x4b, 0x92, 0x33, 0xbd, 0xf0, 0x1e, 0x7a, 0xe6, 0xf7, 0xd1, 0x11, 0x58, 0xcf, 0xa1,
0xc9, 0xca, 0xbc, 0xe9, 0x0c, 0x74, 0x19, 0x0a, 0x7b, 0x6a, 0xa3, 0xd4, 0xcb, 0xa5, 0xa6, 0xc4, 0x9a, 0xdc, 0x87, 0x0c, 0xec, 0xed, 0xd1, 0x8e, 0x7b, 0x8a, 0xb8, 0x48, 0x34, 0x1c, 0xcf, 0xe1,
0x7c, 0x93, 0x08, 0xe7, 0xe9, 0x74, 0xab, 0x29, 0xfb, 0x0a, 0x58, 0x59, 0xe2, 0x95, 0x33, 0xec, 0xdb, 0x4f, 0xa3, 0x8e, 0xcb, 0x49, 0x56, 0xe6, 0x4d, 0x67, 0xa0, 0x4b, 0x50, 0xdc, 0x55, 0x1b,
0x3f, 0xe7, 0x00, 0x46, 0xc1, 0x80, 0x3e, 0x02, 0xe8, 0x91, 0x8e, 0xe7, 0xfe, 0x9a, 0x8f, 0x1a, 0xa5, 0xde, 0x4b, 0x35, 0x25, 0xe6, 0x9b, 0x44, 0x38, 0x4f, 0xa7, 0x5b, 0x4d, 0xd9, 0x97, 0xc1,
0xca, 0xb2, 0x9c, 0x91, 0x5d, 0xe5, 0xa8, 0xf4, 0xcf, 0xbd, 0x73, 0xe9, 0x8f, 0x60, 0x8e, 0x79, 0xca, 0x12, 0xaf, 0x9c, 0x61, 0xff, 0x39, 0x07, 0x30, 0x0c, 0x06, 0xf4, 0x21, 0x40, 0x8f, 0x74,
0xdf, 0x10, 0x5d, 0xa6, 0xc8, 0x31, 0x7a, 0x02, 0x15, 0x37, 0x08, 0x42, 0x2e, 0xc3, 0x38, 0x6e, 0x3c, 0xf7, 0x57, 0x7c, 0xd8, 0x50, 0x96, 0xe5, 0x8c, 0xec, 0x2a, 0x87, 0xa5, 0x7f, 0xee, 0x9d,
0xb6, 0x6f, 0x9d, 0x16, 0xbe, 0xce, 0xd6, 0x88, 0x5f, 0x9d, 0x92, 0xa4, 0x04, 0xeb, 0x3e, 0x2c, 0x4b, 0x7f, 0x04, 0x05, 0xe6, 0x7d, 0x45, 0x74, 0x99, 0x22, 0xc7, 0xe8, 0x09, 0x54, 0xdc, 0x20,
0x4d, 0x32, 0x9c, 0xab, 0x19, 0xfc, 0xd6, 0x80, 0x0b, 0x13, 0x5b, 0x87, 0x3e, 0x1d, 0x66, 0x01, 0x08, 0xb9, 0x0c, 0xe3, 0xb8, 0xd9, 0xbe, 0x79, 0x5a, 0xf8, 0x3a, 0x9b, 0x43, 0x7e, 0x75, 0x4a,
0x63, 0x86, 0xe3, 0x15, 0x27, 0x80, 0x07, 0x50, 0xdd, 0xe2, 0x5c, 0x64, 0x3d, 0x65, 0x9b, 0x6a, 0x92, 0x12, 0xac, 0x7b, 0x50, 0x1b, 0x67, 0x98, 0xa9, 0x19, 0xfc, 0x7b, 0x0e, 0xce, 0x8f, 0x6d,
0xf7, 0x4e, 0xc7, 0x8e, 0x21, 0xec, 0x3f, 0x1a, 0xa3, 0x77, 0xce, 0xcc, 0x9e, 0xff, 0xde, 0x78, 0x1d, 0x7a, 0x00, 0x35, 0x45, 0x8d, 0x3d, 0x90, 0x9c, 0x75, 0xd0, 0x52, 0x28, 0x74, 0x1f, 0xaa,
0xcf, 0x7f, 0x7d, 0xfa, 0xe5, 0xf0, 0x3e, 0x5b, 0xfd, 0x1b, 0x3f, 0x87, 0x0f, 0x33, 0x2f, 0x66, 0x9b, 0x9c, 0x8b, 0x4c, 0xa8, 0xec, 0x55, 0x2d, 0xe0, 0xe9, 0x52, 0x46, 0x10, 0xe8, 0xc1, 0x30,
0x54, 0x81, 0xe2, 0xfe, 0xc1, 0x16, 0x3e, 0x68, 0x35, 0x97, 0x3e, 0x40, 0x55, 0x28, 0x6d, 0x3f, 0xad, 0xe4, 0x27, 0x35, 0xfa, 0x63, 0xfa, 0x67, 0xe7, 0x14, 0xeb, 0x17, 0x93, 0x83, 0x3c, 0xaf,
0xd9, 0xdd, 0x7b, 0xdc, 0x3a, 0x68, 0x2d, 0x19, 0x62, 0xa9, 0xd9, 0x12, 0xe3, 0xe6, 0x52, 0xae, 0xbc, 0xd4, 0x18, 0x0d, 0xf2, 0x33, 0xb2, 0xca, 0xd0, 0x87, 0x7f, 0x34, 0xa0, 0x14, 0x1f, 0xc2,
0xfe, 0x6d, 0x01, 0x8a, 0xdb, 0xea, 0xbf, 0x1e, 0x74, 0x00, 0xe5, 0xe1, 0x9f, 0x00, 0xc8, 0xce, 0xcc, 0xb7, 0x8a, 0xbb, 0xa3, 0x6f, 0x15, 0xd7, 0x26, 0x5f, 0x6a, 0xef, 0xf3, 0x89, 0xe2, 0xfa,
0xf0, 0xce, 0xc4, 0xbf, 0x09, 0xd6, 0xc7, 0xa7, 0xf2, 0xe8, 0xc4, 0xfd, 0x10, 0xe6, 0xe5, 0xdf, 0x4f, 0xe1, 0x83, 0xcc, 0x82, 0x02, 0x55, 0x60, 0x7e, 0x6f, 0x7f, 0x13, 0xef, 0xb7, 0x9a, 0xb5,
0x21, 0x28, 0xa3, 0xbd, 0x4e, 0xfe, 0x4f, 0x62, 0x9d, 0xfe, 0xf7, 0xc2, 0x86, 0x21, 0x24, 0xc9, 0x73, 0xa8, 0x0a, 0xa5, 0xed, 0x27, 0x8f, 0x77, 0x1f, 0xb5, 0xf6, 0x5b, 0x35, 0x43, 0x2c, 0x35,
0xb7, 0x89, 0x2c, 0x49, 0xc9, 0xc7, 0x4b, 0x6b, 0xf5, 0x8c, 0x47, 0x0d, 0xb4, 0x0b, 0x05, 0xdd, 0x5b, 0x62, 0xdc, 0xac, 0xe5, 0x1a, 0xbf, 0x2b, 0xc2, 0xfc, 0xb6, 0xfa, 0x67, 0x0c, 0xed, 0x43,
0xb0, 0x65, 0xb1, 0x26, 0x5f, 0x20, 0xac, 0xb5, 0xe9, 0x0c, 0x4a, 0xd8, 0x86, 0x81, 0x76, 0x87, 0x79, 0xf0, 0x97, 0x09, 0xb2, 0x33, 0x5c, 0x33, 0xf6, 0xdf, 0x8b, 0xf5, 0xd1, 0xa9, 0x3c, 0xfa,
0xef, 0xd1, 0x59, 0xaa, 0x25, 0xab, 0x5d, 0xeb, 0x8c, 0xf5, 0x75, 0x63, 0xc3, 0x40, 0x5f, 0x41, 0xc2, 0x79, 0x00, 0x73, 0xf2, 0xcf, 0x23, 0x94, 0xf1, 0x2c, 0x90, 0xfc, 0x57, 0xc9, 0x3a, 0xfd,
0x25, 0x51, 0xcf, 0xa2, 0x8c, 0x6a, 0x2a, 0x5d, 0x1c, 0x5b, 0xd7, 0xcf, 0xe0, 0xd2, 0x96, 0xb7, 0xcf, 0x98, 0x75, 0x43, 0x48, 0x92, 0x6f, 0x2a, 0x59, 0x92, 0x92, 0x0f, 0xb7, 0xd6, 0xca, 0x19,
0x60, 0x4e, 0x1e, 0xa4, 0x0c, 0x67, 0x27, 0xca, 0xdd, 0x2c, 0x35, 0xc7, 0xca, 0xdf, 0x43, 0x55, 0x8f, 0x31, 0xe8, 0x31, 0x14, 0x75, 0xa3, 0x99, 0xc5, 0x9a, 0x7c, 0x39, 0xb1, 0x56, 0x27, 0x33,
0xa0, 0x93, 0x20, 0x19, 0x7d, 0xe8, 0xfa, 0x59, 0xf7, 0xea, 0xd4, 0xb0, 0x49, 0x05, 0xf1, 0x86, 0x28, 0x61, 0xeb, 0x06, 0x7a, 0x3c, 0x78, 0x8b, 0xcf, 0x52, 0x2d, 0x59, 0xa5, 0x5b, 0x67, 0xac,
0x81, 0x42, 0x40, 0xe9, 0xe4, 0x89, 0x7e, 0x94, 0x11, 0x25, 0xd3, 0x32, 0xb8, 0x75, 0x73, 0x36, 0xaf, 0x19, 0xeb, 0x06, 0xfa, 0x02, 0x2a, 0x89, 0x3a, 0x1c, 0x65, 0x54, 0x81, 0xe9, 0xa2, 0xde,
0x66, 0x65, 0x54, 0xa3, 0xfa, 0xea, 0xcd, 0x8a, 0xf1, 0x8f, 0x37, 0x2b, 0xc6, 0xbf, 0xde, 0xac, 0xba, 0x76, 0x06, 0x97, 0xb6, 0xbc, 0x05, 0x05, 0x99, 0x00, 0x32, 0x9c, 0x9d, 0x28, 0xd3, 0xb3,
0x18, 0x87, 0x05, 0x59, 0x31, 0xfd, 0xf8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7c, 0xb8, 0xc3, 0xd4, 0x1c, 0x29, 0xdb, 0x0f, 0x54, 0x63, 0x41, 0x82, 0x64, 0xf4, 0xa1, 0x6b, 0x67, 0xd5, 0x03,
0x68, 0x0b, 0x1d, 0x00, 0x00, 0x13, 0xc3, 0x26, 0x15, 0xc4, 0xeb, 0x06, 0x0a, 0x01, 0xa5, 0x93, 0x3e, 0xfa, 0x7e, 0x46, 0x94,
0x4c, 0xba, 0x79, 0xac, 0x1b, 0xd3, 0x31, 0x2b, 0xa3, 0xb6, 0xaa, 0xaf, 0xde, 0x2c, 0x1b, 0xff,
0x7c, 0xb3, 0x6c, 0xfc, 0xfb, 0xcd, 0xb2, 0x71, 0x50, 0x94, 0x95, 0xde, 0x0f, 0xbe, 0x09, 0x00,
0x00, 0xff, 0xff, 0x16, 0xc8, 0xe5, 0x4c, 0x39, 0x1e, 0x00, 0x00,
} }
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.
@ -2892,6 +2918,20 @@ func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized) i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized)
} }
if len(m.Exporters) > 0 {
for iNdEx := len(m.Exporters) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Exporters[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintControl(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x6a
}
}
if m.SourcePolicy != nil { if m.SourcePolicy != nil {
{ {
size, err := m.SourcePolicy.MarshalToSizedBuffer(dAtA[:i]) size, err := m.SourcePolicy.MarshalToSizedBuffer(dAtA[:i])
@ -2992,9 +3032,9 @@ func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i-- i--
dAtA[i] = 0x2a dAtA[i] = 0x2a
} }
if len(m.ExporterAttrs) > 0 { if len(m.ExporterAttrsDeprecated) > 0 {
for k := range m.ExporterAttrs { for k := range m.ExporterAttrsDeprecated {
v := m.ExporterAttrs[k] v := m.ExporterAttrsDeprecated[k]
baseI := i baseI := i
i -= len(v) i -= len(v)
copy(dAtA[i:], v) copy(dAtA[i:], v)
@ -3011,10 +3051,10 @@ func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
dAtA[i] = 0x22 dAtA[i] = 0x22
} }
} }
if len(m.Exporter) > 0 { if len(m.ExporterDeprecated) > 0 {
i -= len(m.Exporter) i -= len(m.ExporterDeprecated)
copy(dAtA[i:], m.Exporter) copy(dAtA[i:], m.ExporterDeprecated)
i = encodeVarintControl(dAtA, i, uint64(len(m.Exporter))) i = encodeVarintControl(dAtA, i, uint64(len(m.ExporterDeprecated)))
i-- i--
dAtA[i] = 0x1a dAtA[i] = 0x1a
} }
@ -4339,6 +4379,30 @@ func (m *BuildResultInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized) i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized)
} }
if len(m.Results) > 0 {
for k := range m.Results {
v := m.Results[k]
baseI := i
if v != nil {
{
size, err := v.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintControl(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
i = encodeVarintControl(dAtA, i, uint64(k))
i--
dAtA[i] = 0x8
i = encodeVarintControl(dAtA, i, uint64(baseI-i))
i--
dAtA[i] = 0x1a
}
}
if len(m.Attestations) > 0 { if len(m.Attestations) > 0 {
for iNdEx := len(m.Attestations) - 1; iNdEx >= 0; iNdEx-- { for iNdEx := len(m.Attestations) - 1; iNdEx >= 0; iNdEx-- {
{ {
@ -4353,9 +4417,9 @@ func (m *BuildResultInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
dAtA[i] = 0x12 dAtA[i] = 0x12
} }
} }
if m.Result != nil { if m.ResultDeprecated != nil {
{ {
size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) size, err := m.ResultDeprecated.MarshalToSizedBuffer(dAtA[:i])
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -4564,12 +4628,12 @@ func (m *SolveRequest) Size() (n int) {
l = m.Definition.Size() l = m.Definition.Size()
n += 1 + l + sovControl(uint64(l)) n += 1 + l + sovControl(uint64(l))
} }
l = len(m.Exporter) l = len(m.ExporterDeprecated)
if l > 0 { if l > 0 {
n += 1 + l + sovControl(uint64(l)) n += 1 + l + sovControl(uint64(l))
} }
if len(m.ExporterAttrs) > 0 { if len(m.ExporterAttrsDeprecated) > 0 {
for k, v := range m.ExporterAttrs { for k, v := range m.ExporterAttrsDeprecated {
_ = k _ = k
_ = v _ = v
mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
@ -4620,6 +4684,12 @@ func (m *SolveRequest) Size() (n int) {
l = m.SourcePolicy.Size() l = m.SourcePolicy.Size()
n += 1 + l + sovControl(uint64(l)) n += 1 + l + sovControl(uint64(l))
} }
if len(m.Exporters) > 0 {
for _, e := range m.Exporters {
l = e.Size()
n += 1 + l + sovControl(uint64(l))
}
}
if m.XXX_unrecognized != nil { if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized) n += len(m.XXX_unrecognized)
} }
@ -5203,8 +5273,8 @@ func (m *BuildResultInfo) Size() (n int) {
} }
var l int var l int
_ = l _ = l
if m.Result != nil { if m.ResultDeprecated != nil {
l = m.Result.Size() l = m.ResultDeprecated.Size()
n += 1 + l + sovControl(uint64(l)) n += 1 + l + sovControl(uint64(l))
} }
if len(m.Attestations) > 0 { if len(m.Attestations) > 0 {
@ -5213,6 +5283,19 @@ func (m *BuildResultInfo) Size() (n int) {
n += 1 + l + sovControl(uint64(l)) n += 1 + l + sovControl(uint64(l))
} }
} }
if len(m.Results) > 0 {
for k, v := range m.Results {
_ = k
_ = v
l = 0
if v != nil {
l = v.Size()
l += 1 + sovControl(uint64(l))
}
mapEntrySize := 1 + sovControl(uint64(k)) + l
n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
}
}
if m.XXX_unrecognized != nil { if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized) n += len(m.XXX_unrecognized)
} }
@ -6035,7 +6118,7 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
iNdEx = postIndex iNdEx = postIndex
case 3: case 3:
if wireType != 2 { if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Exporter", wireType) return fmt.Errorf("proto: wrong wireType = %d for field ExporterDeprecated", wireType)
} }
var stringLen uint64 var stringLen uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -6063,11 +6146,11 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
if postIndex > l { if postIndex > l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
m.Exporter = string(dAtA[iNdEx:postIndex]) m.ExporterDeprecated = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex iNdEx = postIndex
case 4: case 4:
if wireType != 2 { if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttrs", wireType) return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttrsDeprecated", wireType)
} }
var msglen int var msglen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -6094,8 +6177,8 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
if postIndex > l { if postIndex > l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
if m.ExporterAttrs == nil { if m.ExporterAttrsDeprecated == nil {
m.ExporterAttrs = make(map[string]string) m.ExporterAttrsDeprecated = make(map[string]string)
} }
var mapkey string var mapkey string
var mapvalue string var mapvalue string
@ -6190,7 +6273,7 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
iNdEx += skippy iNdEx += skippy
} }
} }
m.ExporterAttrs[mapkey] = mapvalue m.ExporterAttrsDeprecated[mapkey] = mapvalue
iNdEx = postIndex iNdEx = postIndex
case 5: case 5:
if wireType != 2 { if wireType != 2 {
@ -6633,6 +6716,40 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
return err return err
} }
iNdEx = postIndex iNdEx = postIndex
case 13:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Exporters", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthControl
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthControl
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Exporters = append(m.Exporters, &Exporter{})
if err := m.Exporters[len(m.Exporters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipControl(dAtA[iNdEx:]) skippy, err := skipControl(dAtA[iNdEx:])
@ -10589,7 +10706,7 @@ func (m *BuildResultInfo) Unmarshal(dAtA []byte) error {
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 2 { if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) return fmt.Errorf("proto: wrong wireType = %d for field ResultDeprecated", wireType)
} }
var msglen int var msglen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -10616,10 +10733,10 @@ func (m *BuildResultInfo) Unmarshal(dAtA []byte) error {
if postIndex > l { if postIndex > l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
if m.Result == nil { if m.ResultDeprecated == nil {
m.Result = &Descriptor{} m.ResultDeprecated = &Descriptor{}
} }
if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { if err := m.ResultDeprecated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err return err
} }
iNdEx = postIndex iNdEx = postIndex
@ -10657,6 +10774,121 @@ func (m *BuildResultInfo) Unmarshal(dAtA []byte) error {
return err return err
} }
iNdEx = postIndex iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthControl
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthControl
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Results == nil {
m.Results = make(map[int64]*Descriptor)
}
var mapkey int64
var mapvalue *Descriptor
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
mapkey |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
} else if fieldNum == 2 {
var mapmsglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
mapmsglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if mapmsglen < 0 {
return ErrInvalidLengthControl
}
postmsgIndex := iNdEx + mapmsglen
if postmsgIndex < 0 {
return ErrInvalidLengthControl
}
if postmsgIndex > l {
return io.ErrUnexpectedEOF
}
mapvalue = &Descriptor{}
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
return err
}
iNdEx = postmsgIndex
} else {
iNdEx = entryPreIndex
skippy, err := skipControl(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthControl
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Results[mapkey] = mapvalue
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipControl(dAtA[iNdEx:]) skippy, err := skipControl(dAtA[iNdEx:])

View file

@ -2,13 +2,13 @@ syntax = "proto3";
package moby.buildkit.v1; package moby.buildkit.v1;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
import "github.com/moby/buildkit/solver/pb/ops.proto";
import "github.com/moby/buildkit/api/types/worker.proto";
// import "github.com/containerd/containerd/api/types/descriptor.proto"; // import "github.com/containerd/containerd/api/types/descriptor.proto";
import "github.com/gogo/googleapis/google/rpc/status.proto"; import "github.com/gogo/googleapis/google/rpc/status.proto";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "github.com/moby/buildkit/api/types/worker.proto";
import "github.com/moby/buildkit/solver/pb/ops.proto";
import "github.com/moby/buildkit/sourcepolicy/pb/policy.proto"; import "github.com/moby/buildkit/sourcepolicy/pb/policy.proto";
import "google/protobuf/timestamp.proto";
option (gogoproto.sizer_all) = true; option (gogoproto.sizer_all) = true;
option (gogoproto.marshaler_all) = true; option (gogoproto.marshaler_all) = true;
@ -60,8 +60,11 @@ message UsageRecord {
message SolveRequest { message SolveRequest {
string Ref = 1; string Ref = 1;
pb.Definition Definition = 2; pb.Definition Definition = 2;
string Exporter = 3; // ExporterDeprecated and ExporterAttrsDeprecated are deprecated in favor
map<string, string> ExporterAttrs = 4; // of the new Exporters. If these fields are set, then they will be
// appended to the Exporters field if Exporters was not explicitly set.
string ExporterDeprecated = 3;
map<string, string> ExporterAttrsDeprecated = 4;
string Session = 5; string Session = 5;
string Frontend = 6; string Frontend = 6;
map<string, string> FrontendAttrs = 7; map<string, string> FrontendAttrs = 7;
@ -70,6 +73,7 @@ message SolveRequest {
map<string, pb.Definition> FrontendInputs = 10; map<string, pb.Definition> FrontendInputs = 10;
bool Internal = 11; // Internal builds are not recorded in build history bool Internal = 11; // Internal builds are not recorded in build history
moby.buildkit.v1.sourcepolicy.Policy SourcePolicy = 12; moby.buildkit.v1.sourcepolicy.Policy SourcePolicy = 12;
repeated Exporter Exporters = 13;
} }
message CacheOptions { message CacheOptions {
@ -227,11 +231,15 @@ message Descriptor {
} }
message BuildResultInfo { message BuildResultInfo {
Descriptor Result = 1; Descriptor ResultDeprecated = 1;
repeated Descriptor Attestations = 2; repeated Descriptor Attestations = 2;
map<int64, Descriptor> Results = 3;
} }
// Exporter describes the output exporter
message Exporter { message Exporter {
// Type identifies the exporter
string Type = 1; string Type = 1;
// Attrs specifies exporter configuration
map<string, string> Attrs = 2; map<string, string> Attrs = 2;
} }

View file

@ -8,12 +8,15 @@ import (
"github.com/containerd/containerd/diff" "github.com/containerd/containerd/diff"
"github.com/containerd/containerd/diff/walking" "github.com/containerd/containerd/diff/walking"
"github.com/containerd/containerd/labels"
"github.com/containerd/containerd/leases" "github.com/containerd/containerd/leases"
"github.com/containerd/containerd/mount" "github.com/containerd/containerd/mount"
"github.com/moby/buildkit/session" "github.com/moby/buildkit/session"
"github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/converter"
"github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/flightcontrol"
"github.com/moby/buildkit/util/leaseutil"
"github.com/moby/buildkit/util/winlayers" "github.com/moby/buildkit/util/winlayers"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
imagespecidentity "github.com/opencontainers/image-spec/identity" imagespecidentity "github.com/opencontainers/image-spec/identity"
@ -22,11 +25,9 @@ import (
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
var g flightcontrol.Group[struct{}] var g flightcontrol.Group[*leaseutil.LeaseRef]
var gFileList flightcontrol.Group[[]string] var gFileList flightcontrol.Group[[]string]
const containerdUncompressed = "containerd.io/uncompressed"
var ErrNoBlobs = errors.Errorf("no blobs for snapshot") var ErrNoBlobs = errors.Errorf("no blobs for snapshot")
// computeBlobChain ensures every ref in a parent chain has an associated blob in the content store. If // computeBlobChain ensures every ref in a parent chain has an associated blob in the content store. If
@ -87,14 +88,24 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
if _, ok := filter[sr.ID()]; ok { if _, ok := filter[sr.ID()]; ok {
eg.Go(func() error { eg.Go(func() error {
_, err := g.Do(ctx, fmt.Sprintf("%s-%t", sr.ID(), createIfNeeded), func(ctx context.Context) (struct{}, error) { l, err := g.Do(ctx, fmt.Sprintf("%s-%t", sr.ID(), createIfNeeded), func(ctx context.Context) (_ *leaseutil.LeaseRef, err error) {
if sr.getBlob() != "" { if sr.getBlob() != "" {
return struct{}{}, nil return nil, nil
} }
if !createIfNeeded { if !createIfNeeded {
return struct{}{}, errors.WithStack(ErrNoBlobs) return nil, errors.WithStack(ErrNoBlobs)
} }
l, ctx, err := leaseutil.NewLease(ctx, sr.cm.LeaseManager, leaseutil.MakeTemporary)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
l.Discard()
}
}()
compressorFunc, finalize := comp.Type.Compress(ctx, comp) compressorFunc, finalize := comp.Type.Compress(ctx, comp)
mediaType := comp.Type.MediaType() mediaType := comp.Type.MediaType()
@ -109,12 +120,12 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
if lowerRef != nil { if lowerRef != nil {
m, err := lowerRef.Mount(ctx, true, s) m, err := lowerRef.Mount(ctx, true, s)
if err != nil { if err != nil {
return struct{}{}, err return nil, err
} }
var release func() error var release func() error
lower, release, err = m.Mount() lower, release, err = m.Mount()
if err != nil { if err != nil {
return struct{}{}, err return nil, err
} }
if release != nil { if release != nil {
defer release() defer release()
@ -132,12 +143,12 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
if upperRef != nil { if upperRef != nil {
m, err := upperRef.Mount(ctx, true, s) m, err := upperRef.Mount(ctx, true, s)
if err != nil { if err != nil {
return struct{}{}, err return nil, err
} }
var release func() error var release func() error
upper, release, err = m.Mount() upper, release, err = m.Mount()
if err != nil { if err != nil {
return struct{}{}, err return nil, err
} }
if release != nil { if release != nil {
defer release() defer release()
@ -145,14 +156,13 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
} }
var desc ocispecs.Descriptor var desc ocispecs.Descriptor
var err error
// Determine differ and error/log handling according to the platform, envvar and the snapshotter. // Determine differ and error/log handling according to the platform, envvar and the snapshotter.
var enableOverlay, fallback, logWarnOnErr bool var enableOverlay, fallback, logWarnOnErr bool
if forceOvlStr := os.Getenv("BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF"); forceOvlStr != "" && sr.kind() != Diff { if forceOvlStr := os.Getenv("BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF"); forceOvlStr != "" && sr.kind() != Diff {
enableOverlay, err = strconv.ParseBool(forceOvlStr) enableOverlay, err = strconv.ParseBool(forceOvlStr)
if err != nil { if err != nil {
return struct{}{}, errors.Wrapf(err, "invalid boolean in BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF") return nil, errors.Wrapf(err, "invalid boolean in BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF")
} }
fallback = false // prohibit fallback on debug fallback = false // prohibit fallback on debug
} else if !isTypeWindows(sr) { } else if !isTypeWindows(sr) {
@ -174,10 +184,10 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
if !ok || err != nil { if !ok || err != nil {
if !fallback { if !fallback {
if !ok { if !ok {
return struct{}{}, errors.Errorf("overlay mounts not detected (lower=%+v,upper=%+v)", lower, upper) return nil, errors.Errorf("overlay mounts not detected (lower=%+v,upper=%+v)", lower, upper)
} }
if err != nil { if err != nil {
return struct{}{}, errors.Wrapf(err, "failed to compute overlay diff") return nil, errors.Wrapf(err, "failed to compute overlay diff")
} }
} }
if logWarnOnErr { if logWarnOnErr {
@ -189,7 +199,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
} }
} }
if desc.Digest == "" && !isTypeWindows(sr) && comp.Type.NeedsComputeDiffBySelf() { if desc.Digest == "" && !isTypeWindows(sr) && comp.Type.NeedsComputeDiffBySelf(comp) {
// These compression types aren't supported by containerd differ. So try to compute diff on buildkit side. // These compression types aren't supported by containerd differ. So try to compute diff on buildkit side.
// This case can be happen on containerd worker + non-overlayfs snapshotter (e.g. native). // This case can be happen on containerd worker + non-overlayfs snapshotter (e.g. native).
// See also: https://github.com/containerd/containerd/issues/4263 // See also: https://github.com/containerd/containerd/issues/4263
@ -210,7 +220,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
diff.WithCompressor(compressorFunc), diff.WithCompressor(compressorFunc),
) )
if err != nil { if err != nil {
return struct{}{}, err return nil, err
} }
} }
@ -220,7 +230,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
if finalize != nil { if finalize != nil {
a, err := finalize(ctx, sr.cm.ContentStore) a, err := finalize(ctx, sr.cm.ContentStore)
if err != nil { if err != nil {
return struct{}{}, errors.Wrapf(err, "failed to finalize compression") return nil, errors.Wrapf(err, "failed to finalize compression")
} }
for k, v := range a { for k, v := range a {
desc.Annotations[k] = v desc.Annotations[k] = v
@ -228,26 +238,32 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
} }
info, err := sr.cm.ContentStore.Info(ctx, desc.Digest) info, err := sr.cm.ContentStore.Info(ctx, desc.Digest)
if err != nil { if err != nil {
return struct{}{}, err return nil, err
} }
if diffID, ok := info.Labels[containerdUncompressed]; ok { if diffID, ok := info.Labels[labels.LabelUncompressed]; ok {
desc.Annotations[containerdUncompressed] = diffID desc.Annotations[labels.LabelUncompressed] = diffID
} else if mediaType == ocispecs.MediaTypeImageLayer { } else if mediaType == ocispecs.MediaTypeImageLayer {
desc.Annotations[containerdUncompressed] = desc.Digest.String() desc.Annotations[labels.LabelUncompressed] = desc.Digest.String()
} else { } else {
return struct{}{}, errors.Errorf("unknown layer compression type") return nil, errors.Errorf("unknown layer compression type")
} }
if err := sr.setBlob(ctx, desc); err != nil { if err := sr.setBlob(ctx, desc); err != nil {
return struct{}{}, err return nil, err
} }
return struct{}{}, nil return l, nil
}) })
if err != nil { if err != nil {
return err return err
} }
if l != nil {
if err := l.Adopt(ctx); err != nil {
return err
}
}
if comp.Force { if comp.Force {
if err := ensureCompression(ctx, sr, comp, s); err != nil { if err := ensureCompression(ctx, sr, comp, s); err != nil {
return errors.Wrapf(err, "failed to ensure compression type of %q", comp.Type) return errors.Wrapf(err, "failed to ensure compression type of %q", comp.Type)
@ -416,29 +432,42 @@ func isTypeWindows(sr *immutableRef) bool {
// ensureCompression ensures the specified ref has the blob of the specified compression Type. // ensureCompression ensures the specified ref has the blob of the specified compression Type.
func ensureCompression(ctx context.Context, ref *immutableRef, comp compression.Config, s session.Group) error { func ensureCompression(ctx context.Context, ref *immutableRef, comp compression.Config, s session.Group) error {
_, err := g.Do(ctx, fmt.Sprintf("ensureComp-%s-%s", ref.ID(), comp.Type), func(ctx context.Context) (struct{}, error) { l, err := g.Do(ctx, fmt.Sprintf("ensureComp-%s-%s", ref.ID(), comp.Type), func(ctx context.Context) (_ *leaseutil.LeaseRef, err error) {
desc, err := ref.ociDesc(ctx, ref.descHandlers, true) desc, err := ref.ociDesc(ctx, ref.descHandlers, true)
if err != nil { if err != nil {
return struct{}{}, err return nil, err
} }
// Resolve converters l, ctx, err := leaseutil.NewLease(ctx, ref.cm.LeaseManager, leaseutil.MakeTemporary)
layerConvertFunc, err := getConverter(ctx, ref.cm.ContentStore, desc, comp)
if err != nil { if err != nil {
return struct{}{}, err return nil, err
}
defer func() {
if err != nil {
l.Discard()
}
}()
// Resolve converters
layerConvertFunc, err := converter.New(ctx, ref.cm.ContentStore, desc, comp)
if err != nil {
return nil, err
} else if layerConvertFunc == nil { } else if layerConvertFunc == nil {
if isLazy, err := ref.isLazy(ctx); err != nil { if isLazy, err := ref.isLazy(ctx); err != nil {
return struct{}{}, err return nil, err
} else if isLazy { } else if isLazy {
// This ref can be used as the specified compressionType. Keep it lazy. // This ref can be used as the specified compressionType. Keep it lazy.
return struct{}{}, nil return l, nil
} }
return struct{}{}, ref.linkBlob(ctx, desc) if err := ref.linkBlob(ctx, desc); err != nil {
return nil, err
}
return l, nil
} }
// First, lookup local content store // First, lookup local content store
if _, err := ref.getBlobWithCompression(ctx, comp.Type); err == nil { if _, err := ref.getBlobWithCompression(ctx, comp.Type); err == nil {
return struct{}{}, nil // found the compression variant. no need to convert. return l, nil // found the compression variant. no need to convert.
} }
// Convert layer compression type // Convert layer compression type
@ -448,18 +477,26 @@ func ensureCompression(ctx context.Context, ref *immutableRef, comp compression.
dh: ref.descHandlers[desc.Digest], dh: ref.descHandlers[desc.Digest],
session: s, session: s,
}).Unlazy(ctx); err != nil { }).Unlazy(ctx); err != nil {
return struct{}{}, err return l, err
} }
newDesc, err := layerConvertFunc(ctx, ref.cm.ContentStore, desc) newDesc, err := layerConvertFunc(ctx, ref.cm.ContentStore, desc)
if err != nil { if err != nil {
return struct{}{}, errors.Wrapf(err, "failed to convert") return nil, errors.Wrapf(err, "failed to convert")
} }
// Start to track converted layer // Start to track converted layer
if err := ref.linkBlob(ctx, *newDesc); err != nil { if err := ref.linkBlob(ctx, *newDesc); err != nil {
return struct{}{}, errors.Wrapf(err, "failed to add compression blob") return nil, errors.Wrapf(err, "failed to add compression blob")
} }
return struct{}{}, nil return l, nil
}) })
return err if err != nil {
return err
}
if l != nil {
if err := l.Adopt(ctx); err != nil {
return err
}
}
return nil
} }

View file

@ -10,6 +10,7 @@ import (
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
labelspkg "github.com/containerd/containerd/labels"
"github.com/containerd/containerd/mount" "github.com/containerd/containerd/mount"
"github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/compression"
@ -42,14 +43,27 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper
if err != nil { if err != nil {
return emptyDesc, false, errors.Wrap(err, "failed to open writer") return emptyDesc, false, errors.Wrap(err, "failed to open writer")
} }
defer func() { defer func() {
if cw != nil { if cw != nil {
// after commit success cw will be set to nil, if cw isn't nil, error
// happened before commit, we should abort this ingest, and because the
// error may incured by ctx cancel, use a new context here. And since
// cm.Close will unlock this ref in the content store, we invoke abort
// to remove the ingest root in advance.
if aerr := sr.cm.ContentStore.Abort(context.Background(), ref); aerr != nil {
bklog.G(ctx).WithError(aerr).Warnf("failed to abort writer %q", ref)
}
if cerr := cw.Close(); cerr != nil { if cerr := cw.Close(); cerr != nil {
bklog.G(ctx).WithError(cerr).Warnf("failed to close writer %q", ref) bklog.G(ctx).WithError(cerr).Warnf("failed to close writer %q", ref)
} }
} }
}() }()
if err = cw.Truncate(0); err != nil {
return emptyDesc, false, errors.Wrap(err, "failed to truncate writer")
}
bufW := bufio.NewWriterSize(cw, 128*1024) bufW := bufio.NewWriterSize(cw, 128*1024)
var labels map[string]string var labels map[string]string
if compressorFunc != nil { if compressorFunc != nil {
@ -69,7 +83,7 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper
if labels == nil { if labels == nil {
labels = map[string]string{} labels = map[string]string{}
} }
labels[containerdUncompressed] = dgstr.Digest().String() labels[labelspkg.LabelUncompressed] = dgstr.Digest().String()
} else { } else {
if err = overlay.WriteUpperdir(ctx, bufW, upperdir, lower); err != nil { if err = overlay.WriteUpperdir(ctx, bufW, upperdir, lower); err != nil {
return emptyDesc, false, errors.Wrap(err, "failed to write diff") return emptyDesc, false, errors.Wrap(err, "failed to write diff")
@ -101,9 +115,9 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper
cinfo.Labels = make(map[string]string) cinfo.Labels = make(map[string]string)
} }
// Set uncompressed label if digest already existed without label // Set uncompressed label if digest already existed without label
if _, ok := cinfo.Labels[containerdUncompressed]; !ok { if _, ok := cinfo.Labels[labelspkg.LabelUncompressed]; !ok {
cinfo.Labels[containerdUncompressed] = labels[containerdUncompressed] cinfo.Labels[labelspkg.LabelUncompressed] = labels[labelspkg.LabelUncompressed]
if _, err := sr.cm.ContentStore.Update(ctx, cinfo, "labels."+containerdUncompressed); err != nil { if _, err := sr.cm.ContentStore.Update(ctx, cinfo, "labels."+labelspkg.LabelUncompressed); err != nil {
return emptyDesc, false, errors.Wrap(err, "error setting uncompressed label") return emptyDesc, false, errors.Wrap(err, "error setting uncompressed label")
} }
} }

View file

@ -6,8 +6,8 @@ package cache
import ( import (
"context" "context"
"github.com/moby/buildkit/util/compression"
"github.com/containerd/containerd/mount" "github.com/containerd/containerd/mount"
"github.com/moby/buildkit/util/compression"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors" "github.com/pkg/errors"
) )

View file

@ -1,16 +0,0 @@
//go:build !nydus
// +build !nydus
package cache
import (
"context"
"github.com/containerd/containerd/content"
"github.com/moby/buildkit/cache/config"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
)
func needsForceCompression(ctx context.Context, cs content.Store, source ocispecs.Descriptor, refCfg config.RefConfig) bool {
return refCfg.Compression.Force
}

View file

@ -10,7 +10,7 @@ import (
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/moby/buildkit/cache/config" "github.com/containerd/containerd/labels"
"github.com/moby/buildkit/session" "github.com/moby/buildkit/session"
"github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/compression"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
@ -27,20 +27,6 @@ func init() {
) )
} }
// Nydus compression type can't be mixed with other compression types in the same image,
// so if `source` is this kind of layer, but the target is other compression type, we
// should do the forced compression.
func needsForceCompression(ctx context.Context, cs content.Store, source ocispecs.Descriptor, refCfg config.RefConfig) bool {
if refCfg.Compression.Force {
return true
}
isNydusBlob, _ := compression.Nydus.Is(ctx, cs, source)
if refCfg.Compression.Type == compression.Nydus {
return !isNydusBlob
}
return isNydusBlob
}
// MergeNydus does two steps: // MergeNydus does two steps:
// 1. Extracts nydus bootstrap from nydus format (nydus blob + nydus bootstrap) for each layer. // 1. Extracts nydus bootstrap from nydus format (nydus blob + nydus bootstrap) for each layer.
// 2. Merge all nydus bootstraps into a final bootstrap (will as an extra layer). // 2. Merge all nydus bootstraps into a final bootstrap (will as an extra layer).
@ -58,7 +44,6 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config,
// Extracts nydus bootstrap from nydus format for each layer. // Extracts nydus bootstrap from nydus format for each layer.
var cm *cacheManager var cm *cacheManager
layers := []converter.Layer{} layers := []converter.Layer{}
blobIDs := []string{}
for _, ref := range refs { for _, ref := range refs {
blobDesc, err := getBlobWithCompressionWithRetry(ctx, ref, comp, s) blobDesc, err := getBlobWithCompressionWithRetry(ctx, ref, comp, s)
if err != nil { if err != nil {
@ -72,7 +57,6 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config,
if cm == nil { if cm == nil {
cm = ref.cm cm = ref.cm
} }
blobIDs = append(blobIDs, blobDesc.Digest.Hex())
layers = append(layers, converter.Layer{ layers = append(layers, converter.Layer{
Digest: blobDesc.Digest, Digest: blobDesc.Digest,
ReaderAt: ra, ReaderAt: ra,
@ -109,7 +93,7 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config,
compressedDgst := cw.Digest() compressedDgst := cw.Digest()
if err := cw.Commit(ctx, 0, compressedDgst, content.WithLabels(map[string]string{ if err := cw.Commit(ctx, 0, compressedDgst, content.WithLabels(map[string]string{
containerdUncompressed: uncompressedDgst.Digest().String(), labels.LabelUncompressed: uncompressedDgst.Digest().String(),
})); err != nil { })); err != nil {
if !errdefs.IsAlreadyExists(err) { if !errdefs.IsAlreadyExists(err) {
return nil, errors.Wrap(err, "commit to content store") return nil, errors.Wrap(err, "commit to content store")
@ -129,7 +113,7 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config,
Size: info.Size, Size: info.Size,
MediaType: ocispecs.MediaTypeImageLayerGzip, MediaType: ocispecs.MediaTypeImageLayerGzip,
Annotations: map[string]string{ Annotations: map[string]string{
containerdUncompressed: uncompressedDgst.Digest().String(), labels.LabelUncompressed: uncompressedDgst.Digest().String(),
// Use this annotation to identify nydus bootstrap layer. // Use this annotation to identify nydus bootstrap layer.
converter.LayerAnnotationNydusBootstrap: "true", converter.LayerAnnotationNydusBootstrap: "true",
}, },

View file

@ -20,8 +20,8 @@ func setUnixOpt(path string, fi os.FileInfo, stat *fstypes.Stat) error {
stat.Gid = s.Gid stat.Gid = s.Gid
if !fi.IsDir() { if !fi.IsDir() {
if s.Mode&syscall.S_IFBLK != 0 || if s.Mode&syscall.S_IFLNK == 0 && (s.Mode&syscall.S_IFBLK != 0 ||
s.Mode&syscall.S_IFCHR != 0 { s.Mode&syscall.S_IFCHR != 0) {
stat.Devmajor = int64(unix.Major(uint64(s.Rdev))) stat.Devmajor = int64(unix.Major(uint64(s.Rdev)))
stat.Devminor = int64(unix.Minor(uint64(s.Rdev))) stat.Devminor = int64(unix.Minor(uint64(s.Rdev)))
} }

View file

@ -35,7 +35,7 @@ func (sr *immutableRef) FileList(ctx context.Context, s session.Group) ([]string
} }
// lazy blobs need to be pulled first // lazy blobs need to be pulled first
if err := sr.Extract(ctx, s); err != nil { if err := sr.ensureLocalContentBlob(ctx, s); err != nil {
return nil, err return nil, err
} }

View file

@ -13,6 +13,7 @@ import (
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/filters" "github.com/containerd/containerd/filters"
"github.com/containerd/containerd/gc" "github.com/containerd/containerd/gc"
"github.com/containerd/containerd/labels"
"github.com/containerd/containerd/leases" "github.com/containerd/containerd/leases"
"github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/idtools"
"github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/cache/metadata"
@ -36,6 +37,8 @@ var (
errInvalid = errors.New("invalid") errInvalid = errors.New("invalid")
) )
const maxPruneBatch = 10 // maximum number of refs to prune while holding the manager lock
type ManagerOpt struct { type ManagerOpt struct {
Snapshotter snapshot.Snapshotter Snapshotter snapshot.Snapshotter
ContentStore content.Store ContentStore content.Store
@ -300,7 +303,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor,
ref := rec.ref(true, descHandlers, nil) ref := rec.ref(true, descHandlers, nil)
if s := unlazySessionOf(opts...); s != nil { if s := unlazySessionOf(opts...); s != nil {
if err := ref.unlazy(ctx, ref.descHandlers, ref.progress, s, true); err != nil { if err := ref.unlazy(ctx, ref.descHandlers, ref.progress, s, true, false); err != nil {
return nil, err return nil, err
} }
} }
@ -321,6 +324,7 @@ func (cm *cacheManager) init(ctx context.Context) error {
bklog.G(ctx).Debugf("could not load snapshot %s: %+v", si.ID(), err) bklog.G(ctx).Debugf("could not load snapshot %s: %+v", si.ID(), err)
cm.MetadataStore.Clear(si.ID()) cm.MetadataStore.Clear(si.ID())
cm.LeaseManager.Delete(ctx, leases.Lease{ID: si.ID()}) cm.LeaseManager.Delete(ctx, leases.Lease{ID: si.ID()})
cm.LeaseManager.Delete(ctx, leases.Lease{ID: si.ID() + "-variants"})
} }
} }
return nil return nil
@ -1055,7 +1059,7 @@ func (cm *cacheManager) pruneOnce(ctx context.Context, ch chan client.UsageInfo,
}) })
} }
func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt pruneOpt) error { func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt pruneOpt) (err error) {
var toDelete []*deleteRecord var toDelete []*deleteRecord
if opt.keepBytes != 0 && opt.totalSize < opt.keepBytes { if opt.keepBytes != 0 && opt.totalSize < opt.keepBytes {
@ -1128,49 +1132,50 @@ func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt
lastUsedAt: c.LastUsedAt, lastUsedAt: c.LastUsedAt,
usageCount: c.UsageCount, usageCount: c.UsageCount,
}) })
if !gcMode { locked[cr.mu] = struct{}{}
cr.dead = true continue // leave the record locked
// mark metadata as deleted in case we crash before cleanup finished
if err := cr.queueDeleted(); err != nil {
cr.mu.Unlock()
cm.mu.Unlock()
return err
}
if err := cr.commitMetadata(); err != nil {
cr.mu.Unlock()
cm.mu.Unlock()
return err
}
} else {
locked[cr.mu] = struct{}{}
continue // leave the record locked
}
} }
} }
cr.mu.Unlock() cr.mu.Unlock()
} }
batchSize := len(toDelete)
if gcMode && len(toDelete) > 0 { if gcMode && len(toDelete) > 0 {
batchSize = 1
sortDeleteRecords(toDelete) sortDeleteRecords(toDelete)
var err error } else if batchSize > maxPruneBatch {
for i, cr := range toDelete { batchSize = maxPruneBatch
// only remove single record at a time
if i == 0 {
cr.dead = true
err = cr.queueDeleted()
if err == nil {
err = cr.commitMetadata()
}
}
cr.mu.Unlock()
}
if err != nil {
return err
}
toDelete = toDelete[:1]
} }
releaseLocks := func() {
for _, cr := range toDelete {
if !cr.released {
cr.released = true
cr.mu.Unlock()
}
}
cm.mu.Unlock()
}
for i, cr := range toDelete {
// only remove single record at a time
if i < batchSize {
cr.dead = true
// mark metadata as deleted in case we crash before cleanup finished
if err := cr.queueDeleted(); err != nil {
releaseLocks()
return err
}
if err := cr.commitMetadata(); err != nil {
releaseLocks()
return err
}
}
cr.mu.Unlock()
cr.released = true
}
toDelete = toDelete[:batchSize]
cm.mu.Unlock() cm.mu.Unlock()
if len(toDelete) == 0 { if len(toDelete) == 0 {
@ -1193,7 +1198,6 @@ func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt
} }
cm.mu.Lock() cm.mu.Lock()
var err error
for _, cr := range toDelete { for _, cr := range toDelete {
cr.mu.Lock() cr.mu.Lock()
@ -1254,7 +1258,7 @@ func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt
select { select {
case <-ctx.Done(): case <-ctx.Done():
return ctx.Err() return context.Cause(ctx)
default: default:
return cm.prune(ctx, ch, opt) return cm.prune(ctx, ch, opt)
} }
@ -1611,6 +1615,7 @@ type deleteRecord struct {
usageCount int usageCount int
lastUsedAtIndex int lastUsedAtIndex int
usageCountIndex int usageCountIndex int
released bool
} }
func sortDeleteRecords(toDelete []*deleteRecord) { func sortDeleteRecords(toDelete []*deleteRecord) {
@ -1657,7 +1662,7 @@ func sortDeleteRecords(toDelete []*deleteRecord) {
} }
func diffIDFromDescriptor(desc ocispecs.Descriptor) (digest.Digest, error) { func diffIDFromDescriptor(desc ocispecs.Descriptor) (digest.Digest, error) {
diffIDStr, ok := desc.Annotations["containerd.io/uncompressed"] diffIDStr, ok := desc.Annotations[labels.LabelUncompressed]
if !ok { if !ok {
return "", errors.Errorf("missing uncompressed annotation for %s", desc.Digest) return "", errors.Errorf("missing uncompressed annotation for %s", desc.Digest)
} }

View file

@ -12,6 +12,7 @@ import (
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/labels"
"github.com/containerd/containerd/leases" "github.com/containerd/containerd/leases"
"github.com/containerd/containerd/mount" "github.com/containerd/containerd/mount"
"github.com/containerd/containerd/pkg/userns" "github.com/containerd/containerd/pkg/userns"
@ -39,7 +40,7 @@ import (
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
var additionalAnnotations = append(compression.EStargzAnnotations, containerdUncompressed) var additionalAnnotations = append(compression.EStargzAnnotations, labels.LabelUncompressed)
// Ref is a reference to cacheable objects. // Ref is a reference to cacheable objects.
type Ref interface { type Ref interface {
@ -443,7 +444,7 @@ func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) (rerr er
"id": cr.ID(), "id": cr.ID(),
"refCount": len(cr.refs), "refCount": len(cr.refs),
"removeSnapshot": removeSnapshot, "removeSnapshot": removeSnapshot,
"stack": bklog.LazyStackTrace{}, "stack": bklog.TraceLevelOnlyStack(),
}) })
if rerr != nil { if rerr != nil {
l = l.WithError(rerr) l = l.WithError(rerr)
@ -487,7 +488,7 @@ func (sr *immutableRef) traceLogFields() logrus.Fields {
"refID": fmt.Sprintf("%p", sr), "refID": fmt.Sprintf("%p", sr),
"newRefCount": len(sr.refs), "newRefCount": len(sr.refs),
"mutable": false, "mutable": false,
"stack": bklog.LazyStackTrace{}, "stack": bklog.TraceLevelOnlyStack(),
} }
if sr.equalMutable != nil { if sr.equalMutable != nil {
m["equalMutableID"] = sr.equalMutable.ID() m["equalMutableID"] = sr.equalMutable.ID()
@ -627,7 +628,7 @@ func (sr *mutableRef) traceLogFields() logrus.Fields {
"refID": fmt.Sprintf("%p", sr), "refID": fmt.Sprintf("%p", sr),
"newRefCount": len(sr.refs), "newRefCount": len(sr.refs),
"mutable": true, "mutable": true,
"stack": bklog.LazyStackTrace{}, "stack": bklog.TraceLevelOnlyStack(),
} }
if sr.equalMutable != nil { if sr.equalMutable != nil {
m["equalMutableID"] = sr.equalMutable.ID() m["equalMutableID"] = sr.equalMutable.ID()
@ -733,7 +734,7 @@ func (sr *immutableRef) ociDesc(ctx context.Context, dhs DescHandlers, preferNon
diffID := sr.getDiffID() diffID := sr.getDiffID()
if diffID != "" { if diffID != "" {
desc.Annotations["containerd.io/uncompressed"] = string(diffID) desc.Annotations[labels.LabelUncompressed] = string(diffID)
} }
createdAt := sr.GetCreatedAt() createdAt := sr.GetCreatedAt()
@ -991,6 +992,14 @@ func (sr *immutableRef) Mount(ctx context.Context, readonly bool, s session.Grou
return mnt, nil return mnt, nil
} }
func (sr *immutableRef) ensureLocalContentBlob(ctx context.Context, s session.Group) error {
if (sr.kind() == Layer || sr.kind() == BaseLayer) && !sr.getBlobOnly() {
return nil
}
return sr.unlazy(ctx, sr.descHandlers, sr.progress, s, true, true)
}
func (sr *immutableRef) Extract(ctx context.Context, s session.Group) (rerr error) { func (sr *immutableRef) Extract(ctx context.Context, s session.Group) (rerr error) {
if (sr.kind() == Layer || sr.kind() == BaseLayer) && !sr.getBlobOnly() { if (sr.kind() == Layer || sr.kind() == BaseLayer) && !sr.getBlobOnly() {
return nil return nil
@ -1001,14 +1010,14 @@ func (sr *immutableRef) Extract(ctx context.Context, s session.Group) (rerr erro
if rerr = sr.prepareRemoteSnapshotsStargzMode(ctx, s); rerr != nil { if rerr = sr.prepareRemoteSnapshotsStargzMode(ctx, s); rerr != nil {
return return
} }
rerr = sr.unlazy(ctx, sr.descHandlers, sr.progress, s, true) rerr = sr.unlazy(ctx, sr.descHandlers, sr.progress, s, true, false)
}); err != nil { }); err != nil {
return err return err
} }
return rerr return rerr
} }
return sr.unlazy(ctx, sr.descHandlers, sr.progress, s, true) return sr.unlazy(ctx, sr.descHandlers, sr.progress, s, true, false)
} }
func (sr *immutableRef) withRemoteSnapshotLabelsStargzMode(ctx context.Context, s session.Group, f func()) error { func (sr *immutableRef) withRemoteSnapshotLabelsStargzMode(ctx context.Context, s session.Group, f func()) error {
@ -1053,7 +1062,7 @@ func (sr *immutableRef) withRemoteSnapshotLabelsStargzMode(ctx context.Context,
} }
func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s session.Group) error { func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s session.Group) error {
_, err := g.Do(ctx, sr.ID()+"-prepare-remote-snapshot", func(ctx context.Context) (_ struct{}, rerr error) { _, err := g.Do(ctx, sr.ID()+"-prepare-remote-snapshot", func(ctx context.Context) (_ *leaseutil.LeaseRef, rerr error) {
dhs := sr.descHandlers dhs := sr.descHandlers
for _, r := range sr.layerChain() { for _, r := range sr.layerChain() {
r := r r := r
@ -1065,7 +1074,7 @@ func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s
dh := dhs[digest.Digest(r.getBlob())] dh := dhs[digest.Digest(r.getBlob())]
if dh == nil { if dh == nil {
// We cannot prepare remote snapshots without descHandler. // We cannot prepare remote snapshots without descHandler.
return struct{}{}, nil return nil, nil
} }
// tmpLabels contains dh.SnapshotLabels + session IDs. All keys contain // tmpLabels contains dh.SnapshotLabels + session IDs. All keys contain
@ -1098,8 +1107,17 @@ func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s
if err == nil { // usable as remote snapshot without unlazying. if err == nil { // usable as remote snapshot without unlazying.
defer func() { defer func() {
// Remove tmp labels appended in this func // Remove tmp labels appended in this func
for k := range tmpLabels { if info.Labels != nil {
info.Labels[k] = "" for k := range tmpLabels {
info.Labels[k] = ""
}
} else {
// We are logging here to track to try to debug when and why labels are nil.
// Log can be removed when not happening anymore.
bklog.G(ctx).
WithField("snapshotID", snapshotID).
WithField("name", info.Name).
Debug("snapshots exist but labels are nil")
} }
if _, err := r.cm.Snapshotter.Update(ctx, info, tmpFields...); err != nil { if _, err := r.cm.Snapshotter.Update(ctx, info, tmpFields...); err != nil {
bklog.G(ctx).Warn(errors.Wrapf(err, bklog.G(ctx).Warn(errors.Wrapf(err,
@ -1117,7 +1135,7 @@ func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s
break break
} }
return struct{}{}, nil return nil, nil
}) })
return err return err
} }
@ -1139,25 +1157,36 @@ func makeTmpLabelsStargzMode(labels map[string]string, s session.Group) (fields
return return
} }
func (sr *immutableRef) unlazy(ctx context.Context, dhs DescHandlers, pg progress.Controller, s session.Group, topLevel bool) error { func (sr *immutableRef) unlazy(ctx context.Context, dhs DescHandlers, pg progress.Controller, s session.Group, topLevel bool, ensureContentStore bool) error {
_, err := g.Do(ctx, sr.ID()+"-unlazy", func(ctx context.Context) (_ struct{}, rerr error) { _, err := g.Do(ctx, sr.ID()+"-unlazy", func(ctx context.Context) (_ *leaseutil.LeaseRef, rerr error) {
if _, err := sr.cm.Snapshotter.Stat(ctx, sr.getSnapshotID()); err == nil { if _, err := sr.cm.Snapshotter.Stat(ctx, sr.getSnapshotID()); err == nil {
return struct{}{}, nil if !ensureContentStore {
return nil, nil
}
if blob := sr.getBlob(); blob == "" {
return nil, nil
}
if _, err := sr.cm.ContentStore.Info(ctx, sr.getBlob()); err == nil {
return nil, nil
}
} }
switch sr.kind() { switch sr.kind() {
case Merge, Diff: case Merge, Diff:
return struct{}{}, sr.unlazyDiffMerge(ctx, dhs, pg, s, topLevel) return nil, sr.unlazyDiffMerge(ctx, dhs, pg, s, topLevel, ensureContentStore)
case Layer, BaseLayer: case Layer, BaseLayer:
return struct{}{}, sr.unlazyLayer(ctx, dhs, pg, s) return nil, sr.unlazyLayer(ctx, dhs, pg, s, ensureContentStore)
} }
return struct{}{}, nil return nil, nil
}) })
return err if err != nil {
return err
}
return nil
} }
// should be called within sizeG.Do call for this ref's ID // should be called within sizeG.Do call for this ref's ID
func (sr *immutableRef) unlazyDiffMerge(ctx context.Context, dhs DescHandlers, pg progress.Controller, s session.Group, topLevel bool) (rerr error) { func (sr *immutableRef) unlazyDiffMerge(ctx context.Context, dhs DescHandlers, pg progress.Controller, s session.Group, topLevel bool, ensureContentStore bool) (rerr error) {
eg, egctx := errgroup.WithContext(ctx) eg, egctx := errgroup.WithContext(ctx)
var diffs []snapshot.Diff var diffs []snapshot.Diff
sr.layerWalk(func(sr *immutableRef) { sr.layerWalk(func(sr *immutableRef) {
@ -1167,13 +1196,13 @@ func (sr *immutableRef) unlazyDiffMerge(ctx context.Context, dhs DescHandlers, p
if sr.diffParents.lower != nil { if sr.diffParents.lower != nil {
diff.Lower = sr.diffParents.lower.getSnapshotID() diff.Lower = sr.diffParents.lower.getSnapshotID()
eg.Go(func() error { eg.Go(func() error {
return sr.diffParents.lower.unlazy(egctx, dhs, pg, s, false) return sr.diffParents.lower.unlazy(egctx, dhs, pg, s, false, ensureContentStore)
}) })
} }
if sr.diffParents.upper != nil { if sr.diffParents.upper != nil {
diff.Upper = sr.diffParents.upper.getSnapshotID() diff.Upper = sr.diffParents.upper.getSnapshotID()
eg.Go(func() error { eg.Go(func() error {
return sr.diffParents.upper.unlazy(egctx, dhs, pg, s, false) return sr.diffParents.upper.unlazy(egctx, dhs, pg, s, false, ensureContentStore)
}) })
} }
case Layer: case Layer:
@ -1182,7 +1211,7 @@ func (sr *immutableRef) unlazyDiffMerge(ctx context.Context, dhs DescHandlers, p
case BaseLayer: case BaseLayer:
diff.Upper = sr.getSnapshotID() diff.Upper = sr.getSnapshotID()
eg.Go(func() error { eg.Go(func() error {
return sr.unlazy(egctx, dhs, pg, s, false) return sr.unlazy(egctx, dhs, pg, s, false, ensureContentStore)
}) })
} }
diffs = append(diffs, diff) diffs = append(diffs, diff)
@ -1213,7 +1242,7 @@ func (sr *immutableRef) unlazyDiffMerge(ctx context.Context, dhs DescHandlers, p
} }
// should be called within sizeG.Do call for this ref's ID // should be called within sizeG.Do call for this ref's ID
func (sr *immutableRef) unlazyLayer(ctx context.Context, dhs DescHandlers, pg progress.Controller, s session.Group) (rerr error) { func (sr *immutableRef) unlazyLayer(ctx context.Context, dhs DescHandlers, pg progress.Controller, s session.Group, ensureContentStore bool) (rerr error) {
if !sr.getBlobOnly() { if !sr.getBlobOnly() {
return nil return nil
} }
@ -1240,7 +1269,7 @@ func (sr *immutableRef) unlazyLayer(ctx context.Context, dhs DescHandlers, pg pr
parentID := "" parentID := ""
if sr.layerParent != nil { if sr.layerParent != nil {
eg.Go(func() error { eg.Go(func() error {
if err := sr.layerParent.unlazy(egctx, dhs, pg, s, false); err != nil { if err := sr.layerParent.unlazy(egctx, dhs, pg, s, false, ensureContentStore); err != nil {
return err return err
} }
parentID = sr.layerParent.getSnapshotID() parentID = sr.layerParent.getSnapshotID()

View file

@ -212,7 +212,7 @@ func (sr *immutableRef) getRemote(ctx context.Context, createIfNeeded bool, refC
} }
} }
if needsForceCompression(ctx, sr.cm.ContentStore, desc, refCfg) { if refCfg.Compression.Force {
if needs, err := refCfg.Compression.Type.NeedsConversion(ctx, sr.cm.ContentStore, desc); err != nil { if needs, err := refCfg.Compression.Type.NeedsConversion(ctx, sr.cm.ContentStore, desc); err != nil {
return nil, err return nil, err
} else if needs { } else if needs {

View file

@ -11,6 +11,7 @@ import (
v1 "github.com/moby/buildkit/cache/remotecache/v1" v1 "github.com/moby/buildkit/cache/remotecache/v1"
"github.com/moby/buildkit/session" "github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/contentutil" "github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/util/progress"
@ -185,6 +186,11 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string
return nil, err return nil, err
} }
if len(config.Layers) == 0 {
bklog.G(ctx).Warn("failed to match any cache with layers")
return nil, progress.OneOff(ctx, "skipping cache export for empty result")(nil)
}
cache, err := NewExportableCache(ce.oci, ce.imageManifest) cache, err := NewExportableCache(ce.oci, ce.imageManifest)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -11,6 +11,7 @@ import (
"time" "time"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/labels"
"github.com/moby/buildkit/cache/remotecache" "github.com/moby/buildkit/cache/remotecache"
v1 "github.com/moby/buildkit/cache/remotecache/v1" v1 "github.com/moby/buildkit/cache/remotecache/v1"
"github.com/moby/buildkit/session" "github.com/moby/buildkit/session"
@ -133,7 +134,7 @@ func (ce *exporter) Finalize(ctx context.Context) (map[string]string, error) {
return nil, errors.Errorf("invalid descriptor without annotations") return nil, errors.Errorf("invalid descriptor without annotations")
} }
var diffID digest.Digest var diffID digest.Digest
v, ok := dgstPair.Descriptor.Annotations["containerd.io/uncompressed"] v, ok := dgstPair.Descriptor.Annotations[labels.LabelUncompressed]
if !ok { if !ok {
return nil, errors.Errorf("invalid descriptor without uncompressed annotation") return nil, errors.Errorf("invalid descriptor without uncompressed annotation")
} }
@ -226,7 +227,7 @@ func (ci *importer) makeDescriptorProviderPair(l v1.CacheLayer) (*v1.DescriptorP
if l.Annotations.DiffID == "" { if l.Annotations.DiffID == "" {
return nil, errors.Errorf("cache layer with missing diffid") return nil, errors.Errorf("cache layer with missing diffid")
} }
annotations["containerd.io/uncompressed"] = l.Annotations.DiffID.String() annotations[labels.LabelUncompressed] = l.Annotations.DiffID.String()
if !l.Annotations.CreatedAt.IsZero() { if !l.Annotations.CreatedAt.IsZero() {
txt, err := l.Annotations.CreatedAt.MarshalText() txt, err := l.Annotations.CreatedAt.MarshalText()
if err != nil { if err != nil {

View file

@ -10,6 +10,7 @@ import (
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/labels"
v1 "github.com/moby/buildkit/cache/remotecache/v1" v1 "github.com/moby/buildkit/cache/remotecache/v1"
"github.com/moby/buildkit/session" "github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver"
@ -221,7 +222,7 @@ func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte
if createdBy := createdMsg[i]; createdBy != "" { if createdBy := createdMsg[i]; createdBy != "" {
m.Annotations["buildkit/description"] = createdBy m.Annotations["buildkit/description"] = createdBy
} }
m.Annotations["containerd.io/uncompressed"] = img.Rootfs.DiffIDs[i].String() m.Annotations[labels.LabelUncompressed] = img.Rootfs.DiffIDs[i].String()
layers[m.Digest] = v1.DescriptorProviderPair{ layers[m.Digest] = v1.DescriptorProviderPair{
Descriptor: m, Descriptor: m,
Provider: ci.provider, Provider: ci.provider,

View file

@ -1,9 +1,10 @@
package registry package inline
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"github.com/containerd/containerd/labels"
"github.com/moby/buildkit/cache/remotecache" "github.com/moby/buildkit/cache/remotecache"
v1 "github.com/moby/buildkit/cache/remotecache/v1" v1 "github.com/moby/buildkit/cache/remotecache/v1"
"github.com/moby/buildkit/session" "github.com/moby/buildkit/session"
@ -67,7 +68,7 @@ func (ce *exporter) ExportForLayers(ctx context.Context, layers []digest.Digest)
} }
// fallback for uncompressed digests // fallback for uncompressed digests
for _, v := range descs { for _, v := range descs {
if uc := v.Descriptor.Annotations["containerd.io/uncompressed"]; uc == string(k) { if uc := v.Descriptor.Annotations[labels.LabelUncompressed]; uc == string(k) {
descs2[v.Descriptor.Digest] = v descs2[v.Descriptor.Digest] = v
layerBlobDigests[i] = v.Descriptor.Digest layerBlobDigests[i] = v.Descriptor.Digest
} }

View file

@ -105,8 +105,9 @@ func getContentStore(ctx context.Context, sm *session.Manager, g session.Group,
if sessionID == "" { if sessionID == "" {
return nil, errors.New("local cache exporter/importer requires session") return nil, errors.New("local cache exporter/importer requires session")
} }
timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) timeoutCtx, cancel := context.WithCancelCause(context.Background())
defer cancel() timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 5*time.Second, errors.WithStack(context.DeadlineExceeded))
defer cancel(errors.WithStack(context.Canceled))
caller, err := sm.Get(timeoutCtx, sessionID, false) caller, err := sm.Get(timeoutCtx, sessionID, false)
if err != nil { if err != nil {

View file

@ -7,7 +7,7 @@ import (
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/remotes/docker"
"github.com/containerd/containerd/snapshots" "github.com/containerd/containerd/snapshots"
"github.com/docker/distribution/reference" "github.com/distribution/reference"
"github.com/moby/buildkit/cache/remotecache" "github.com/moby/buildkit/cache/remotecache"
"github.com/moby/buildkit/session" "github.com/moby/buildkit/session"
"github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/compression"

View file

@ -21,21 +21,26 @@ type CacheChains struct {
visited map[interface{}]struct{} visited map[interface{}]struct{}
} }
var _ solver.CacheExporterTarget = &CacheChains{}
func (c *CacheChains) Add(dgst digest.Digest) solver.CacheExporterRecord { func (c *CacheChains) Add(dgst digest.Digest) solver.CacheExporterRecord {
if strings.HasPrefix(dgst.String(), "random:") { if strings.HasPrefix(dgst.String(), "random:") {
// random digests will be different *every* run - so we shouldn't cache
// it, since there's a zero chance this random digest collides again
return &nopRecord{} return &nopRecord{}
} }
it := &item{c: c, dgst: dgst, backlinks: map[*item]struct{}{}}
it := &item{dgst: dgst, backlinks: map[*item]struct{}{}}
c.items = append(c.items, it) c.items = append(c.items, it)
return it return it
} }
func (c *CacheChains) Visit(v interface{}) { func (c *CacheChains) Visit(target any) {
c.visited[v] = struct{}{} c.visited[target] = struct{}{}
} }
func (c *CacheChains) Visited(v interface{}) bool { func (c *CacheChains) Visited(target any) bool {
_, ok := c.visited[v] _, ok := c.visited[target]
return ok return ok
} }
@ -76,6 +81,12 @@ func (c *CacheChains) normalize(ctx context.Context) error {
return nil return nil
} }
// Marshal converts the cache chains structure into a cache config and a
// collection of providers for reading the results from.
//
// Marshal aims to validate, normalize and sort the output to ensure a
// consistent digest (since cache configs are typically uploaded and stored in
// content-addressable OCI registries).
func (c *CacheChains) Marshal(ctx context.Context) (*CacheConfig, DescriptorProvider, error) { func (c *CacheChains) Marshal(ctx context.Context) (*CacheConfig, DescriptorProvider, error) {
if err := c.normalize(ctx); err != nil { if err := c.normalize(ctx); err != nil {
return nil, nil, err return nil, nil, err
@ -109,19 +120,37 @@ type DescriptorProviderPair struct {
Provider content.Provider Provider content.Provider
} }
// item is an implementation of a record in the cache chain. After validation,
// normalization and marshalling into the cache config, the item results form
// into the "layers", while the digests and the links form into the "records".
type item struct { type item struct {
c *CacheChains // dgst is the unique identifier for each record.
// This *roughly* corresponds to an edge (vertex cachekey + index) in the
// solver - however, a single vertex can produce multiple unique cache keys
// (e.g. fast/slow), so it's a one-to-many relation.
dgst digest.Digest dgst digest.Digest
// links are what connect records to each other (with an optional selector),
// organized by input index (which correspond to vertex inputs).
// We can have multiple links for each index, since *any* of these could be
// used to get to this item (e.g. we could retrieve by fast/slow key).
links []map[link]struct{}
// backlinks are the inverse of a link - these don't actually get directly
// exported, but they're internally used to help efficiently navigate the
// graph.
backlinks map[*item]struct{}
backlinksMu sync.Mutex
// result is the result of computing the edge - this is the target of the
// data we actually want to store in the cache chain.
result *solver.Remote result *solver.Remote
resultTime time.Time resultTime time.Time
links []map[link]struct{} invalid bool
backlinksMu sync.Mutex
backlinks map[*item]struct{}
invalid bool
} }
// link is a pointer to an item, with an optional selector.
type link struct { type link struct {
src *item src *item
selector string selector string
@ -170,25 +199,46 @@ func (c *item) LinkFrom(rec solver.CacheExporterRecord, index int, selector stri
src.backlinksMu.Unlock() src.backlinksMu.Unlock()
} }
// validate checks if an item is valid (i.e. each index has at least one link)
// and marks it as such.
//
// Essentially, if an index has no links, it means that this cache record is
// unreachable by the cache importer, so we should remove it. Once we've marked
// an item as invalid, we remove it from it's backlinks and check it's
// validity again - since now this linked item may be unreachable too.
func (c *item) validate() { func (c *item) validate() {
if c.invalid {
// early exit, if the item is already invalid, we've already gone
// through the backlinks
return
}
for _, m := range c.links { for _, m := range c.links {
// if an index has no links, there's no way to access this record, so
// mark it as invalid
if len(m) == 0 { if len(m) == 0 {
c.invalid = true c.invalid = true
for bl := range c.backlinks { break
changed := false }
for _, m := range bl.links { }
for l := range m {
if l.src == c { if c.invalid {
delete(m, l) for bl := range c.backlinks {
changed = true // remove ourselves from the backlinked item
} changed := false
for _, m := range bl.links {
for l := range m {
if l.src == c {
delete(m, l)
changed = true
} }
} }
if changed {
bl.validate()
}
} }
return
// if we've removed ourselves, we need to check it again
if changed {
bl.validate()
}
} }
} }
} }
@ -211,6 +261,7 @@ func (c *item) walkAllResults(fn func(i *item) error, visited map[*item]struct{}
return nil return nil
} }
// nopRecord is used to discard cache results that we're not interested in storing.
type nopRecord struct { type nopRecord struct {
} }
@ -219,5 +270,3 @@ func (c *nopRecord) AddResult(_ digest.Digest, _ int, createdAt time.Time, resul
func (c *nopRecord) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) { func (c *nopRecord) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) {
} }
var _ solver.CacheExporterTarget = &CacheChains{}

View file

@ -30,7 +30,6 @@ package cacheimport
// }, // },
// { // {
// "digest": "sha256:deadbeef", // "digest": "sha256:deadbeef",
// "output": 1, <- optional output index
// "layers": [ <- optional array of layer pointers // "layers": [ <- optional array of layer pointers
// { // {
// "createdat": "", // "createdat": "",

View file

@ -90,17 +90,17 @@ type ReadDirRequest struct {
func ReadDir(ctx context.Context, mount snapshot.Mountable, req ReadDirRequest) ([]*fstypes.Stat, error) { func ReadDir(ctx context.Context, mount snapshot.Mountable, req ReadDirRequest) ([]*fstypes.Stat, error) {
var ( var (
rd []*fstypes.Stat rd []*fstypes.Stat
wo fsutil.WalkOpt fo fsutil.FilterOpt
) )
if req.IncludePattern != "" { if req.IncludePattern != "" {
wo.IncludePatterns = append(wo.IncludePatterns, req.IncludePattern) fo.IncludePatterns = append(fo.IncludePatterns, req.IncludePattern)
} }
err := withMount(ctx, mount, func(root string) error { err := withMount(ctx, mount, func(root string) error {
fp, err := fs.RootPath(root, req.Path) fp, err := fs.RootPath(root, req.Path)
if err != nil { if err != nil {
return errors.WithStack(err) return errors.WithStack(err)
} }
return fsutil.Walk(ctx, fp, &wo, func(path string, info os.FileInfo, err error) error { return fsutil.Walk(ctx, fp, &fo, func(path string, info os.FileInfo, err error) error {
if err != nil { if err != nil {
return errors.Wrapf(err, "walking %q", root) return errors.Wrapf(err, "walking %q", root)
} }
@ -128,6 +128,16 @@ func StatFile(ctx context.Context, mount snapshot.Mountable, path string) (*fsty
return errors.WithStack(err) return errors.WithStack(err)
} }
if st, err = fsutil.Stat(fp); err != nil { if st, err = fsutil.Stat(fp); err != nil {
// The filename here is internal to the mount, so we can restore
// the request base path for error reporting.
// See os.DirFS.Open for details.
err1 := err
if err := errors.Cause(err); err != nil {
err1 = err
}
if pe, ok := err1.(*os.PathError); ok {
pe.Path = path
}
return errors.WithStack(err) return errors.WithStack(err)
} }
return nil return nil

View file

@ -84,6 +84,11 @@ func (g *gatewayClientForBuild) ResolveImageConfig(ctx context.Context, in *gate
return g.gateway.ResolveImageConfig(ctx, in, opts...) return g.gateway.ResolveImageConfig(ctx, in, opts...)
} }
func (g *gatewayClientForBuild) ResolveSourceMeta(ctx context.Context, in *gatewayapi.ResolveSourceMetaRequest, opts ...grpc.CallOption) (*gatewayapi.ResolveSourceMetaResponse, error) {
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
return g.gateway.ResolveSourceMeta(ctx, in, opts...)
}
func (g *gatewayClientForBuild) Solve(ctx context.Context, in *gatewayapi.SolveRequest, opts ...grpc.CallOption) (*gatewayapi.SolveResponse, error) { func (g *gatewayClientForBuild) Solve(ctx context.Context, in *gatewayapi.SolveRequest, opts ...grpc.CallOption) (*gatewayapi.SolveResponse, error) {
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
return g.gateway.Solve(ctx, in, opts...) return g.gateway.Solve(ctx, in, opts...)

View file

@ -59,9 +59,6 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
var creds *withCredentials var creds *withCredentials
for _, o := range opts { for _, o := range opts {
if _, ok := o.(*withFailFast); ok {
gopts = append(gopts, grpc.FailOnNonTempDialError(true))
}
if credInfo, ok := o.(*withCredentials); ok { if credInfo, ok := o.(*withCredentials); ok {
if creds == nil { if creds == nil {
creds = &withCredentials{} creds = &withCredentials{}
@ -105,8 +102,8 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
if tracerProvider != nil { if tracerProvider != nil {
var propagators = propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}) var propagators = propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})
unary = append(unary, filterInterceptor(otelgrpc.UnaryClientInterceptor(otelgrpc.WithTracerProvider(tracerProvider), otelgrpc.WithPropagators(propagators)))) unary = append(unary, filterInterceptor(otelgrpc.UnaryClientInterceptor(otelgrpc.WithTracerProvider(tracerProvider), otelgrpc.WithPropagators(propagators)))) //nolint:staticcheck // TODO(thaJeztah): ignore SA1019 for deprecated options: see https://github.com/moby/buildkit/issues/4681
stream = append(stream, otelgrpc.StreamClientInterceptor(otelgrpc.WithTracerProvider(tracerProvider), otelgrpc.WithPropagators(propagators))) stream = append(stream, otelgrpc.StreamClientInterceptor(otelgrpc.WithTracerProvider(tracerProvider), otelgrpc.WithPropagators(propagators))) //nolint:staticcheck // TODO(thaJeztah): ignore SA1019 for deprecated options: see https://github.com/moby/buildkit/issues/4681
} }
if needDialer { if needDialer {
@ -205,7 +202,7 @@ func (c *Client) Wait(ctx context.Context) error {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return ctx.Err() return context.Cause(ctx)
case <-time.After(time.Second): case <-time.After(time.Second):
} }
c.conn.ResetConnectBackoff() c.conn.ResetConnectBackoff()
@ -216,14 +213,6 @@ func (c *Client) Close() error {
return c.conn.Close() return c.conn.Close()
} }
type withFailFast struct{}
func (*withFailFast) isClientOpt() {}
func WithFailFast() ClientOpt {
return &withFailFast{}
}
type withDialer struct { type withDialer struct {
dialer func(context.Context, string) (net.Conn, error) dialer func(context.Context, string) (net.Conn, error)
} }

View file

@ -8,49 +8,50 @@ import (
) )
type Vertex struct { type Vertex struct {
Digest digest.Digest Digest digest.Digest `json:"digest,omitempty"`
Inputs []digest.Digest Inputs []digest.Digest `json:"inputs,omitempty"`
Name string Name string `json:"name,omitempty"`
Started *time.Time Started *time.Time `json:"started,omitempty"`
Completed *time.Time Completed *time.Time `json:"completed,omitempty"`
Cached bool Cached bool `json:"cached,omitempty"`
Error string Error string `json:"error,omitempty"`
ProgressGroup *pb.ProgressGroup ProgressGroup *pb.ProgressGroup `json:"progressGroup,omitempty"`
} }
type VertexStatus struct { type VertexStatus struct {
ID string ID string `json:"id"`
Vertex digest.Digest Vertex digest.Digest `json:"vertex,omitempty"`
Name string Name string `json:"name,omitempty"`
Total int64 Total int64 `json:"total,omitempty"`
Current int64 Current int64 `json:"current"`
Timestamp time.Time Timestamp time.Time `json:"timestamp,omitempty"`
Started *time.Time Started *time.Time `json:"started,omitempty"`
Completed *time.Time Completed *time.Time `json:"completed,omitempty"`
} }
type VertexLog struct { type VertexLog struct {
Vertex digest.Digest Vertex digest.Digest `json:"vertex,omitempty"`
Stream int Stream int `json:"stream,omitempty"`
Data []byte Data []byte `json:"data"`
Timestamp time.Time Timestamp time.Time `json:"timestamp"`
} }
type VertexWarning struct { type VertexWarning struct {
Vertex digest.Digest Vertex digest.Digest `json:"vertex,omitempty"`
Level int Level int `json:"level,omitempty"`
Short []byte Short []byte `json:"short,omitempty"`
Detail [][]byte Detail [][]byte `json:"detail,omitempty"`
URL string URL string `json:"url,omitempty"`
SourceInfo *pb.SourceInfo
Range []*pb.Range SourceInfo *pb.SourceInfo `json:"sourceInfo,omitempty"`
Range []*pb.Range `json:"range,omitempty"`
} }
type SolveStatus struct { type SolveStatus struct {
Vertexes []*Vertex Vertexes []*Vertex `json:"vertexes,omitempty"`
Statuses []*VertexStatus Statuses []*VertexStatus `json:"statuses,omitempty"`
Logs []*VertexLog Logs []*VertexLog `json:"logs,omitempty"`
Warnings []*VertexWarning Warnings []*VertexWarning `json:"warnings,omitempty"`
} }
type SolveResponse struct { type SolveResponse struct {

View file

@ -61,7 +61,7 @@ func (as *asyncState) Do(ctx context.Context, c *Constraints) error {
if err != nil { if err != nil {
select { select {
case <-ctx.Done(): case <-ctx.Done():
if errors.Is(err, ctx.Err()) { if errors.Is(err, context.Cause(ctx)) {
return res, err return res, err
} }
default: default:

View file

@ -46,6 +46,7 @@ type mount struct {
tmpfsOpt TmpfsInfo tmpfsOpt TmpfsInfo
cacheSharing CacheMountSharingMode cacheSharing CacheMountSharingMode
noOutput bool noOutput bool
contentCache MountContentCache
} }
type ExecOp struct { type ExecOp struct {
@ -281,6 +282,9 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
} else if m.source != nil { } else if m.source != nil {
addCap(&e.constraints, pb.CapExecMountBind) addCap(&e.constraints, pb.CapExecMountBind)
} }
if m.contentCache != MountContentCacheDefault {
addCap(&e.constraints, pb.CapExecMountContentCache)
}
} }
if len(e.secrets) > 0 { if len(e.secrets) > 0 {
@ -366,6 +370,14 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
pm.CacheOpt.Sharing = pb.CacheSharingOpt_LOCKED pm.CacheOpt.Sharing = pb.CacheSharingOpt_LOCKED
} }
} }
switch m.contentCache {
case MountContentCacheDefault:
pm.ContentCache = pb.MountContentCache_DEFAULT
case MountContentCacheOn:
pm.ContentCache = pb.MountContentCache_ON
case MountContentCacheOff:
pm.ContentCache = pb.MountContentCache_OFF
}
if m.tmpfs { if m.tmpfs {
pm.MountType = pb.MountType_TMPFS pm.MountType = pb.MountType_TMPFS
pm.TmpfsOpt = &pb.TmpfsOpt{ pm.TmpfsOpt = &pb.TmpfsOpt{
@ -492,6 +504,12 @@ func ForceNoOutput(m *mount) {
m.noOutput = true m.noOutput = true
} }
func ContentCache(cache MountContentCache) MountOption {
return func(m *mount) {
m.contentCache = cache
}
}
func AsPersistentCacheDir(id string, sharing CacheMountSharingMode) MountOption { func AsPersistentCacheDir(id string, sharing CacheMountSharingMode) MountOption {
return func(m *mount) { return func(m *mount) {
m.cacheID = id m.cacheID = id
@ -783,3 +801,11 @@ const (
UlimitSigpending UlimitName = "sigpending" UlimitSigpending UlimitName = "sigpending"
UlimitStack UlimitName = "stack" UlimitStack UlimitName = "stack"
) )
type MountContentCache int
const (
MountContentCacheDefault MountContentCache = iota
MountContentCacheOn
MountContentCacheOff
)

View file

@ -398,6 +398,18 @@ func WithAllowWildcard(b bool) RmOption {
}) })
} }
type excludeOnCopyAction struct {
patterns []string
}
func (e *excludeOnCopyAction) SetCopyOption(i *CopyInfo) {
i.ExcludePatterns = append(i.ExcludePatterns, e.patterns...)
}
func WithExcludePatterns(patterns []string) CopyOption {
return &excludeOnCopyAction{patterns}
}
type fileActionRm struct { type fileActionRm struct {
file string file string
info RmInfo info RmInfo

View file

@ -9,6 +9,7 @@ import (
"github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/remotes/docker"
"github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/client/llb/sourceresolver"
"github.com/moby/buildkit/util/contentutil" "github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/imageutil" "github.com/moby/buildkit/util/imageutil"
"github.com/moby/buildkit/version" "github.com/moby/buildkit/version"
@ -70,32 +71,31 @@ type imageMetaResolver struct {
} }
type resolveResult struct { type resolveResult struct {
ref string
config []byte config []byte
dgst digest.Digest dgst digest.Digest
} }
func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (string, digest.Digest, []byte, error) { func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, opt sourceresolver.Opt) (string, digest.Digest, []byte, error) {
imr.locker.Lock(ref) imr.locker.Lock(ref)
defer imr.locker.Unlock(ref) defer imr.locker.Unlock(ref)
platform := opt.Platform platform := imr.platform
if platform == nil { if opt.Platform != nil {
platform = imr.platform platform = opt.Platform
} }
k := imr.key(ref, platform) k := imr.key(ref, platform)
if res, ok := imr.cache[k]; ok { if res, ok := imr.cache[k]; ok {
return res.ref, res.dgst, res.config, nil return ref, res.dgst, res.config, nil
} }
ref, dgst, config, err := imageutil.Config(ctx, ref, imr.resolver, imr.buffer, nil, platform, opt.SourcePolicies) dgst, config, err := imageutil.Config(ctx, ref, imr.resolver, imr.buffer, nil, platform)
if err != nil { if err != nil {
return "", "", nil, err return "", "", nil, err
} }
imr.cache[k] = resolveResult{dgst: dgst, config: config, ref: ref} imr.cache[k] = resolveResult{dgst: dgst, config: config}
return ref, dgst, config, nil return ref, dgst, config, nil
} }

View file

@ -95,14 +95,18 @@ func MarshalConstraints(base, override *Constraints) (*pb.Op, *pb.OpMetadata) {
c.Platform = &defaultPlatform c.Platform = &defaultPlatform
} }
opPlatform := pb.Platform{
OS: c.Platform.OS,
Architecture: c.Platform.Architecture,
Variant: c.Platform.Variant,
OSVersion: c.Platform.OSVersion,
}
if c.Platform.OSFeatures != nil {
opPlatform.OSFeatures = append([]string{}, c.Platform.OSFeatures...)
}
return &pb.Op{ return &pb.Op{
Platform: &pb.Platform{ Platform: &opPlatform,
OS: c.Platform.OS,
Architecture: c.Platform.Architecture,
Variant: c.Platform.Variant,
OSVersion: c.Platform.OSVersion,
OSFeatures: c.Platform.OSFeatures,
},
Constraints: &pb.WorkerConstraints{ Constraints: &pb.WorkerConstraints{
Filter: c.WorkerConstraints, Filter: c.WorkerConstraints,
}, },

View file

@ -1,11 +1,7 @@
package llb package llb
import ( import (
"context" "github.com/moby/buildkit/client/llb/sourceresolver"
spb "github.com/moby/buildkit/sourcepolicy/pb"
digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
) )
// WithMetaResolver adds a metadata resolver to an image // WithMetaResolver adds a metadata resolver to an image
@ -31,30 +27,4 @@ func WithLayerLimit(l int) ImageOption {
} }
// ImageMetaResolver can resolve image config metadata from a reference // ImageMetaResolver can resolve image config metadata from a reference
type ImageMetaResolver interface { type ImageMetaResolver = sourceresolver.ImageMetaResolver
ResolveImageConfig(ctx context.Context, ref string, opt ResolveImageConfigOpt) (string, digest.Digest, []byte, error)
}
type ResolverType int
const (
ResolverTypeRegistry ResolverType = iota
ResolverTypeOCILayout
)
type ResolveImageConfigOpt struct {
ResolverType
Platform *ocispecs.Platform
ResolveMode string
LogName string
Store ResolveImageConfigOptStore
SourcePolicies []*spb.Policy
}
type ResolveImageConfigOptStore struct {
SessionID string
StoreID string
}

View file

@ -5,10 +5,12 @@ import (
_ "crypto/sha256" // for opencontainers/go-digest _ "crypto/sha256" // for opencontainers/go-digest
"encoding/json" "encoding/json"
"os" "os"
"path"
"strconv" "strconv"
"strings" "strings"
"github.com/docker/distribution/reference" "github.com/distribution/reference"
"github.com/moby/buildkit/client/llb/sourceresolver"
"github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/apicaps" "github.com/moby/buildkit/util/apicaps"
"github.com/moby/buildkit/util/gitutil" "github.com/moby/buildkit/util/gitutil"
@ -135,10 +137,11 @@ func Image(ref string, opts ...ImageOption) State {
if p == nil { if p == nil {
p = c.Platform p = c.Platform
} }
_, _, dt, err := info.metaResolver.ResolveImageConfig(ctx, ref, ResolveImageConfigOpt{ _, _, dt, err := info.metaResolver.ResolveImageConfig(ctx, ref, sourceresolver.Opt{
Platform: p, Platform: p,
ResolveMode: info.resolveMode.String(), ImageOpt: &sourceresolver.ResolveImageOpt{
ResolverType: ResolverTypeRegistry, ResolveMode: info.resolveMode.String(),
},
}) })
if err != nil { if err != nil {
return State{}, err return State{}, err
@ -151,10 +154,11 @@ func Image(ref string, opts ...ImageOption) State {
if p == nil { if p == nil {
p = c.Platform p = c.Platform
} }
ref, dgst, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, ResolveImageConfigOpt{ ref, dgst, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, sourceresolver.Opt{
Platform: p, Platform: p,
ResolveMode: info.resolveMode.String(), ImageOpt: &sourceresolver.ResolveImageOpt{
ResolverType: ResolverTypeRegistry, ResolveMode: info.resolveMode.String(),
},
}) })
if err != nil { if err != nil {
return State{}, err return State{}, err
@ -226,7 +230,7 @@ type ImageInfo struct {
// Git returns a state that represents a git repository. // Git returns a state that represents a git repository.
// Example: // Example:
// //
// st := llb.Git("https://github.com/moby/buildkit.git#v0.11.6") // st := llb.Git("https://github.com/moby/buildkit.git", "v0.11.6")
// //
// The example fetches the v0.11.6 tag of the buildkit repository. // The example fetches the v0.11.6 tag of the buildkit repository.
// You can also use a commit hash or a branch name. // You can also use a commit hash or a branch name.
@ -237,29 +241,29 @@ type ImageInfo struct {
// //
// By default the git repository is cloned with `--depth=1` to reduce the amount of data downloaded. // By default the git repository is cloned with `--depth=1` to reduce the amount of data downloaded.
// Additionally the ".git" directory is removed after the clone, you can keep ith with the [KeepGitDir] [GitOption]. // Additionally the ".git" directory is removed after the clone, you can keep ith with the [KeepGitDir] [GitOption].
func Git(remote, ref string, opts ...GitOption) State { func Git(url, ref string, opts ...GitOption) State {
url := strings.Split(remote, "#")[0] remote, err := gitutil.ParseURL(url)
if errors.Is(err, gitutil.ErrUnknownProtocol) {
var protocolType int
remote, protocolType = gitutil.ParseProtocol(remote)
var sshHost string
if protocolType == gitutil.SSHProtocol {
parts := strings.SplitN(remote, ":", 2)
if len(parts) == 2 {
sshHost = parts[0]
// keep remote consistent with http(s) version
remote = parts[0] + "/" + parts[1]
}
}
if protocolType == gitutil.UnknownProtocol {
url = "https://" + url url = "https://" + url
remote, err = gitutil.ParseURL(url)
}
if remote != nil {
url = remote.Remote
} }
id := remote var id string
if err != nil {
if ref != "" { // If we can't parse the URL, just use the full URL as the ID. The git
id += "#" + ref // operation will fail later on.
id = url
} else {
// We construct the ID manually here, so that we can create the same ID
// for different protocols (e.g. https and ssh) that have the same
// host/path/fragment combination.
id = remote.Host + path.Join("/", remote.Path)
if ref != "" {
id += "#" + ref
}
} }
gi := &GitInfo{ gi := &GitInfo{
@ -290,11 +294,11 @@ func Git(remote, ref string, opts ...GitOption) State {
addCap(&gi.Constraints, pb.CapSourceGitHTTPAuth) addCap(&gi.Constraints, pb.CapSourceGitHTTPAuth)
} }
} }
if protocolType == gitutil.SSHProtocol { if remote != nil && remote.Scheme == gitutil.SSHProtocol {
if gi.KnownSSHHosts != "" { if gi.KnownSSHHosts != "" {
attrs[pb.AttrKnownSSHHosts] = gi.KnownSSHHosts attrs[pb.AttrKnownSSHHosts] = gi.KnownSSHHosts
} else if sshHost != "" { } else {
keyscan, err := sshutil.SSHKeyScan(sshHost) keyscan, err := sshutil.SSHKeyScan(remote.Host)
if err == nil { if err == nil {
// best effort // best effort
attrs[pb.AttrKnownSSHHosts] = keyscan attrs[pb.AttrKnownSSHHosts] = keyscan

View file

@ -0,0 +1,59 @@
package sourceresolver
import (
"context"
"strings"
"github.com/distribution/reference"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/imageutil"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
type ImageMetaResolver interface {
ResolveImageConfig(ctx context.Context, ref string, opt Opt) (string, digest.Digest, []byte, error)
}
type imageMetaResolver struct {
mr MetaResolver
}
var _ ImageMetaResolver = &imageMetaResolver{}
func NewImageMetaResolver(mr MetaResolver) ImageMetaResolver {
return &imageMetaResolver{
mr: mr,
}
}
func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, opt Opt) (string, digest.Digest, []byte, error) {
parsed, err := reference.ParseNormalizedNamed(ref)
if err != nil {
return "", "", nil, errors.Wrapf(err, "could not parse reference %q", ref)
}
ref = parsed.String()
op := &pb.SourceOp{
Identifier: "docker-image://" + ref,
}
if opt := opt.OCILayoutOpt; opt != nil {
op.Identifier = "oci-layout://" + ref
op.Attrs = map[string]string{}
if opt.Store.SessionID != "" {
op.Attrs[pb.AttrOCILayoutSessionID] = opt.Store.SessionID
}
if opt.Store.StoreID != "" {
op.Attrs[pb.AttrOCILayoutStoreID] = opt.Store.StoreID
}
}
res, err := imr.mr.ResolveSourceMetadata(ctx, op, opt)
if err != nil {
return "", "", nil, errors.Wrapf(err, "failed to resolve source metadata for %s", ref)
}
if res.Image == nil {
return "", "", nil, &imageutil.ResolveToNonImageError{Ref: ref, Updated: res.Op.Identifier}
}
ref = strings.TrimPrefix(res.Op.Identifier, "docker-image://")
ref = strings.TrimPrefix(ref, "oci-layout://")
return ref, res.Image.Digest, res.Image.Config, nil
}

View file

@ -0,0 +1,54 @@
package sourceresolver
import (
"context"
"github.com/moby/buildkit/solver/pb"
spb "github.com/moby/buildkit/sourcepolicy/pb"
digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
)
type ResolverType int
const (
ResolverTypeRegistry ResolverType = iota
ResolverTypeOCILayout
)
type MetaResolver interface {
ResolveSourceMetadata(ctx context.Context, op *pb.SourceOp, opt Opt) (*MetaResponse, error)
}
type Opt struct {
LogName string
SourcePolicies []*spb.Policy
Platform *ocispecs.Platform
ImageOpt *ResolveImageOpt
OCILayoutOpt *ResolveOCILayoutOpt
}
type MetaResponse struct {
Op *pb.SourceOp
Image *ResolveImageResponse
}
type ResolveImageOpt struct {
ResolveMode string
}
type ResolveImageResponse struct {
Digest digest.Digest
Config []byte
}
type ResolveOCILayoutOpt struct {
Store ResolveImageConfigOptStore
}
type ResolveImageConfigOptStore struct {
SessionID string
StoreID string
}

View file

@ -229,7 +229,7 @@ func (s State) Output() Output {
return s.out return s.out
} }
// WithOutput creats a new state with the output set to the given output. // WithOutput creates a new state with the output set to the given output.
func (s State) WithOutput(o Output) State { func (s State) WithOutput(o Output) State {
prev := s prev := s
s = State{ s = State{
@ -258,16 +258,21 @@ func (s State) WithImageConfig(c []byte) (State, error) {
} }
s = s.Dir(img.Config.WorkingDir) s = s.Dir(img.Config.WorkingDir)
if img.Architecture != "" && img.OS != "" { if img.Architecture != "" && img.OS != "" {
s = s.Platform(ocispecs.Platform{ plat := ocispecs.Platform{
OS: img.OS, OS: img.OS,
Architecture: img.Architecture, Architecture: img.Architecture,
Variant: img.Variant, Variant: img.Variant,
}) OSVersion: img.OSVersion,
}
if img.OSFeatures != nil {
plat.OSFeatures = append([]string{}, img.OSFeatures...)
}
s = s.Platform(plat)
} }
return s, nil return s, nil
} }
// Run performs the command specified by the arguments within the contexst of the current [State]. // Run performs the command specified by the arguments within the context of the current [State].
// The command is executed as a container with the [State]'s filesystem as the root filesystem. // The command is executed as a container with the [State]'s filesystem as the root filesystem.
// As such any command you run must be present in the [State]'s filesystem. // As such any command you run must be present in the [State]'s filesystem.
// Constraints such as [State.Ulimit], [State.ParentCgroup], [State.Network], etc. are applied to the container. // Constraints such as [State.Ulimit], [State.ParentCgroup], [State.Network], etc. are applied to the container.

View file

@ -12,9 +12,6 @@ import (
) )
const ( const (
// indexFile is the name of the index file
indexFile = "index.json"
// lockFileSuffix is the suffix of the lock file // lockFileSuffix is the suffix of the lock file
lockFileSuffix = ".lock" lockFileSuffix = ".lock"
) )
@ -26,7 +23,7 @@ type StoreIndex struct {
} }
func NewStoreIndex(storePath string) StoreIndex { func NewStoreIndex(storePath string) StoreIndex {
indexPath := path.Join(storePath, indexFile) indexPath := path.Join(storePath, ocispecs.ImageIndexFile)
layoutPath := path.Join(storePath, ocispecs.ImageLayoutFile) layoutPath := path.Join(storePath, ocispecs.ImageLayoutFile)
return StoreIndex{ return StoreIndex{
indexPath: indexPath, indexPath: indexPath,

View file

@ -35,7 +35,8 @@ import (
type SolveOpt struct { type SolveOpt struct {
Exports []ExportEntry Exports []ExportEntry
LocalDirs map[string]string LocalDirs map[string]string // Deprecated: use LocalMounts
LocalMounts map[string]fsutil.FS
OCIStores map[string]content.Store OCIStores map[string]content.Store
SharedKey string SharedKey string
Frontend string Frontend string
@ -55,8 +56,8 @@ type SolveOpt struct {
type ExportEntry struct { type ExportEntry struct {
Type string Type string
Attrs map[string]string Attrs map[string]string
Output func(map[string]string) (io.WriteCloser, error) // for ExporterOCI and ExporterDocker Output filesync.FileOutputFunc // for ExporterOCI and ExporterDocker
OutputDir string // for ExporterLocal OutputDir string // for ExporterLocal
} }
type CacheOptionsEntry struct { type CacheOptionsEntry struct {
@ -90,7 +91,11 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
return nil, errors.New("invalid with def and cb") return nil, errors.New("invalid with def and cb")
} }
syncedDirs, err := prepareSyncedDirs(def, opt.LocalDirs) mounts, err := prepareMounts(&opt)
if err != nil {
return nil, err
}
syncedDirs, err := prepareSyncedFiles(def, mounts)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -101,8 +106,8 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
} }
eg, ctx := errgroup.WithContext(ctx) eg, ctx := errgroup.WithContext(ctx)
statusContext, cancelStatus := context.WithCancel(context.Background()) statusContext, cancelStatus := context.WithCancelCause(context.Background())
defer cancelStatus() defer cancelStatus(errors.WithStack(context.Canceled))
if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() { if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() {
statusContext = trace.ContextWithSpan(statusContext, span) statusContext = trace.ContextWithSpan(statusContext, span)
@ -125,14 +130,6 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
return nil, err return nil, err
} }
var ex ExportEntry
if len(opt.Exports) > 1 {
return nil, errors.New("currently only single Exports can be specified")
}
if len(opt.Exports) == 1 {
ex = opt.Exports[0]
}
storesToUpdate := []string{} storesToUpdate := []string{}
if !opt.SessionPreInitialized { if !opt.SessionPreInitialized {
@ -156,51 +153,52 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
contentStores[key2] = store contentStores[key2] = store
} }
var supportFile bool var syncTargets []filesync.FSSyncTarget
var supportDir bool for exID, ex := range opt.Exports {
switch ex.Type { var supportFile bool
case ExporterLocal: var supportDir bool
supportDir = true
case ExporterTar:
supportFile = true
case ExporterOCI, ExporterDocker:
supportDir = ex.OutputDir != ""
supportFile = ex.Output != nil
}
if supportFile && supportDir {
return nil, errors.Errorf("both file and directory output is not supported by %s exporter", ex.Type)
}
if !supportFile && ex.Output != nil {
return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
}
if !supportDir && ex.OutputDir != "" {
return nil, errors.Errorf("output directory is not supported by %s exporter", ex.Type)
}
if supportFile {
if ex.Output == nil {
return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type)
}
s.Allow(filesync.NewFSSyncTarget(ex.Output))
}
if supportDir {
if ex.OutputDir == "" {
return nil, errors.Errorf("output directory is required for %s exporter", ex.Type)
}
switch ex.Type { switch ex.Type {
case ExporterLocal:
supportDir = true
case ExporterTar:
supportFile = true
case ExporterOCI, ExporterDocker: case ExporterOCI, ExporterDocker:
if err := os.MkdirAll(ex.OutputDir, 0755); err != nil { supportDir = ex.OutputDir != ""
return nil, err supportFile = ex.Output != nil
}
if supportFile && supportDir {
return nil, errors.Errorf("both file and directory output is not supported by %s exporter", ex.Type)
}
if !supportFile && ex.Output != nil {
return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
}
if !supportDir && ex.OutputDir != "" {
return nil, errors.Errorf("output directory is not supported by %s exporter", ex.Type)
}
if supportFile {
if ex.Output == nil {
return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type)
} }
cs, err := contentlocal.NewStore(ex.OutputDir) syncTargets = append(syncTargets, filesync.WithFSSync(exID, ex.Output))
if err != nil { }
return nil, err if supportDir {
if ex.OutputDir == "" {
return nil, errors.Errorf("output directory is required for %s exporter", ex.Type)
}
switch ex.Type {
case ExporterOCI, ExporterDocker:
if err := os.MkdirAll(ex.OutputDir, 0755); err != nil {
return nil, err
}
cs, err := contentlocal.NewStore(ex.OutputDir)
if err != nil {
return nil, err
}
contentStores["export"] = cs
storesToUpdate = append(storesToUpdate, ex.OutputDir)
default:
syncTargets = append(syncTargets, filesync.WithFSSyncDir(exID, ex.OutputDir))
} }
contentStores["export"] = cs
storesToUpdate = append(storesToUpdate, ex.OutputDir)
default:
s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
} }
} }
@ -208,6 +206,10 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
s.Allow(sessioncontent.NewAttachable(contentStores)) s.Allow(sessioncontent.NewAttachable(contentStores))
} }
if len(syncTargets) > 0 {
s.Allow(filesync.NewFSSyncTarget(syncTargets...))
}
eg.Go(func() error { eg.Go(func() error {
sd := c.sessionDialer sd := c.sessionDialer
if sd == nil { if sd == nil {
@ -225,16 +227,16 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
frontendAttrs[k] = v frontendAttrs[k] = v
} }
solveCtx, cancelSolve := context.WithCancel(ctx) solveCtx, cancelSolve := context.WithCancelCause(ctx)
var res *SolveResponse var res *SolveResponse
eg.Go(func() error { eg.Go(func() error {
ctx := solveCtx ctx := solveCtx
defer cancelSolve() defer cancelSolve(errors.WithStack(context.Canceled))
defer func() { // make sure the Status ends cleanly on build errors defer func() { // make sure the Status ends cleanly on build errors
go func() { go func() {
<-time.After(3 * time.Second) <-time.After(3 * time.Second)
cancelStatus() cancelStatus(errors.WithStack(context.Canceled))
}() }()
if !opt.SessionPreInitialized { if !opt.SessionPreInitialized {
bklog.G(ctx).Debugf("stopping session") bklog.G(ctx).Debugf("stopping session")
@ -255,19 +257,34 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
frontendInputs[key] = def.ToPB() frontendInputs[key] = def.ToPB()
} }
exports := make([]*controlapi.Exporter, 0, len(opt.Exports))
exportDeprecated := ""
exportAttrDeprecated := map[string]string{}
for i, exp := range opt.Exports {
if i == 0 {
exportDeprecated = exp.Type
exportAttrDeprecated = exp.Attrs
}
exports = append(exports, &controlapi.Exporter{
Type: exp.Type,
Attrs: exp.Attrs,
})
}
resp, err := c.ControlClient().Solve(ctx, &controlapi.SolveRequest{ resp, err := c.ControlClient().Solve(ctx, &controlapi.SolveRequest{
Ref: ref, Ref: ref,
Definition: pbd, Definition: pbd,
Exporter: ex.Type, Exporters: exports,
ExporterAttrs: ex.Attrs, ExporterDeprecated: exportDeprecated,
Session: s.ID(), ExporterAttrsDeprecated: exportAttrDeprecated,
Frontend: opt.Frontend, Session: s.ID(),
FrontendAttrs: frontendAttrs, Frontend: opt.Frontend,
FrontendInputs: frontendInputs, FrontendAttrs: frontendAttrs,
Cache: cacheOpt.options, FrontendInputs: frontendInputs,
Entitlements: opt.AllowedEntitlements, Cache: cacheOpt.options,
Internal: opt.Internal, Entitlements: opt.AllowedEntitlements,
SourcePolicy: opt.SourcePolicy, Internal: opt.Internal,
SourcePolicy: opt.SourcePolicy,
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "failed to solve") return errors.Wrap(err, "failed to solve")
@ -293,7 +310,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
select { select {
case <-solveCtx.Done(): case <-solveCtx.Done():
case <-time.After(5 * time.Second): case <-time.After(5 * time.Second):
cancelSolve() cancelSolve(errors.WithStack(context.Canceled))
} }
return err return err
@ -361,26 +378,23 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
return res, nil return res, nil
} }
func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) (filesync.StaticDirSource, error) { func prepareSyncedFiles(def *llb.Definition, localMounts map[string]fsutil.FS) (filesync.StaticDirSource, error) {
for _, d := range localDirs {
fi, err := os.Stat(d)
if err != nil {
return nil, errors.Wrapf(err, "could not find %s", d)
}
if !fi.IsDir() {
return nil, errors.Errorf("%s not a directory", d)
}
}
resetUIDAndGID := func(p string, st *fstypes.Stat) fsutil.MapResult { resetUIDAndGID := func(p string, st *fstypes.Stat) fsutil.MapResult {
st.Uid = 0 st.Uid = 0
st.Gid = 0 st.Gid = 0
return fsutil.MapResultKeep return fsutil.MapResultKeep
} }
dirs := make(filesync.StaticDirSource, len(localDirs)) result := make(filesync.StaticDirSource, len(localMounts))
if def == nil { if def == nil {
for name, d := range localDirs { for name, mount := range localMounts {
dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID} mount, err := fsutil.NewFilterFS(mount, &fsutil.FilterOpt{
Map: resetUIDAndGID,
})
if err != nil {
return nil, err
}
result[name] = mount
} }
} else { } else {
for _, dt := range def.Def { for _, dt := range def.Def {
@ -391,16 +405,22 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) (filesy
if src := op.GetSource(); src != nil { if src := op.GetSource(); src != nil {
if strings.HasPrefix(src.Identifier, "local://") { if strings.HasPrefix(src.Identifier, "local://") {
name := strings.TrimPrefix(src.Identifier, "local://") name := strings.TrimPrefix(src.Identifier, "local://")
d, ok := localDirs[name] mount, ok := localMounts[name]
if !ok { if !ok {
return nil, errors.Errorf("local directory %s not enabled", name) return nil, errors.Errorf("local directory %s not enabled", name)
} }
dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID} mount, err := fsutil.NewFilterFS(mount, &fsutil.FilterOpt{
Map: resetUIDAndGID,
})
if err != nil {
return nil, err
}
result[name] = mount
} }
} }
} }
} }
return dirs, nil return result, nil
} }
func defaultSessionName() string { func defaultSessionName() string {
@ -523,3 +543,22 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
} }
return &res, nil return &res, nil
} }
func prepareMounts(opt *SolveOpt) (map[string]fsutil.FS, error) {
// merge local mounts and fallback local directories together
mounts := make(map[string]fsutil.FS)
for k, mount := range opt.LocalMounts {
mounts[k] = mount
}
for k, dir := range opt.LocalDirs {
mount, err := fsutil.NewFS(dir)
if err != nil {
return nil, err
}
if _, ok := mounts[k]; ok {
return nil, errors.Errorf("local mount %s already exists", k)
}
mounts[k] = mount
}
return mounts, nil
}

View file

@ -14,9 +14,15 @@ type Config struct {
// Entitlements e.g. security.insecure, network.host // Entitlements e.g. security.insecure, network.host
Entitlements []string `toml:"insecure-entitlements"` Entitlements []string `toml:"insecure-entitlements"`
// LogFormat is the format of the logs. It can be "json" or "text".
Log LogConfig `toml:"log"`
// GRPC configuration settings // GRPC configuration settings
GRPC GRPCConfig `toml:"grpc"` GRPC GRPCConfig `toml:"grpc"`
OTEL OTELConfig `toml:"otel"`
Workers struct { Workers struct {
OCI OCIConfig `toml:"oci"` OCI OCIConfig `toml:"oci"`
Containerd ContainerdConfig `toml:"containerd"` Containerd ContainerdConfig `toml:"containerd"`
@ -29,6 +35,10 @@ type Config struct {
History *HistoryConfig `toml:"history"` History *HistoryConfig `toml:"history"`
} }
type LogConfig struct {
Format string `toml:"format"`
}
type GRPCConfig struct { type GRPCConfig struct {
Address []string `toml:"address"` Address []string `toml:"address"`
DebugAddress string `toml:"debugAddress"` DebugAddress string `toml:"debugAddress"`
@ -46,6 +56,10 @@ type TLSConfig struct {
CA string `toml:"ca"` CA string `toml:"ca"`
} }
type OTELConfig struct {
SocketPath string `toml:"socketPath"`
}
type GCConfig struct { type GCConfig struct {
GC *bool `toml:"gc"` GC *bool `toml:"gc"`
GCKeepStorage DiskSpace `toml:"gckeepstorage"` GCKeepStorage DiskSpace `toml:"gckeepstorage"`
@ -57,6 +71,8 @@ type NetworkConfig struct {
CNIConfigPath string `toml:"cniConfigPath"` CNIConfigPath string `toml:"cniConfigPath"`
CNIBinaryPath string `toml:"cniBinaryPath"` CNIBinaryPath string `toml:"cniBinaryPath"`
CNIPoolSize int `toml:"cniPoolSize"` CNIPoolSize int `toml:"cniPoolSize"`
BridgeName string `toml:"bridgeName"`
BridgeSubnet string `toml:"bridgeSubnet"`
} }
type OCIConfig struct { type OCIConfig struct {
@ -98,6 +114,7 @@ type ContainerdConfig struct {
Labels map[string]string `toml:"labels"` Labels map[string]string `toml:"labels"`
Platforms []string `toml:"platforms"` Platforms []string `toml:"platforms"`
Namespace string `toml:"namespace"` Namespace string `toml:"namespace"`
Runtime ContainerdRuntime `toml:"runtime"`
GCConfig GCConfig
NetworkConfig NetworkConfig
Snapshotter string `toml:"snapshotter"` Snapshotter string `toml:"snapshotter"`
@ -114,6 +131,12 @@ type ContainerdConfig struct {
Rootless bool `toml:"rootless"` Rootless bool `toml:"rootless"`
} }
type ContainerdRuntime struct {
Name string `toml:"name"`
Path string `toml:"path"`
Options map[string]interface{} `toml:"options"`
}
type GCPolicy struct { type GCPolicy struct {
All bool `toml:"all"` All bool `toml:"all"`
KeepBytes DiskSpace `toml:"keepBytes"` KeepBytes DiskSpace `toml:"keepBytes"`

View file

@ -11,7 +11,7 @@ import (
contentapi "github.com/containerd/containerd/api/services/content/v1" contentapi "github.com/containerd/containerd/api/services/content/v1"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/services/content/contentserver" "github.com/containerd/containerd/services/content/contentserver"
"github.com/docker/distribution/reference" "github.com/distribution/reference"
"github.com/hashicorp/go-multierror" "github.com/hashicorp/go-multierror"
"github.com/mitchellh/hashstructure/v2" "github.com/mitchellh/hashstructure/v2"
controlapi "github.com/moby/buildkit/api/services/control" controlapi "github.com/moby/buildkit/api/services/control"
@ -130,11 +130,12 @@ func (c *Controller) Close() error {
if err := c.opt.WorkerController.Close(); err != nil { if err := c.opt.WorkerController.Close(); err != nil {
rerr = multierror.Append(rerr, err) rerr = multierror.Append(rerr, err)
} }
if err := c.opt.CacheStore.Close(); err != nil { if err := c.opt.CacheStore.Close(); err != nil {
rerr = multierror.Append(rerr, err) rerr = multierror.Append(rerr, err)
} }
if err := c.solver.Close(); err != nil {
rerr = multierror.Append(rerr, err)
}
return rerr return rerr
} }
@ -313,6 +314,7 @@ func translateLegacySolveRequest(req *controlapi.SolveRequest) {
req.Cache.ExportRefDeprecated = "" req.Cache.ExportRefDeprecated = ""
req.Cache.ExportAttrsDeprecated = nil req.Cache.ExportAttrsDeprecated = nil
} }
// translates ImportRefs to new Imports (v0.4.0) // translates ImportRefs to new Imports (v0.4.0)
for _, legacyImportRef := range req.Cache.ImportRefsDeprecated { for _, legacyImportRef := range req.Cache.ImportRefsDeprecated {
im := &controlapi.CacheOptionsEntry{ im := &controlapi.CacheOptionsEntry{
@ -323,6 +325,16 @@ func translateLegacySolveRequest(req *controlapi.SolveRequest) {
req.Cache.Imports = append(req.Cache.Imports, im) req.Cache.Imports = append(req.Cache.Imports, im)
} }
req.Cache.ImportRefsDeprecated = nil req.Cache.ImportRefsDeprecated = nil
// translate single exporter to a slice (v0.13.0)
if len(req.Exporters) == 0 && req.ExporterDeprecated != "" {
req.Exporters = append(req.Exporters, &controlapi.Exporter{
Type: req.ExporterDeprecated,
Attrs: req.ExporterAttrsDeprecated,
})
req.ExporterDeprecated = ""
req.ExporterAttrsDeprecated = nil
}
} }
func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*controlapi.SolveResponse, error) { func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*controlapi.SolveResponse, error) {
@ -335,7 +347,6 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*
time.AfterFunc(time.Second, c.throttledGC) time.AfterFunc(time.Second, c.throttledGC)
}() }()
var expi exporter.ExporterInstance
// TODO: multiworker // TODO: multiworker
// This is actually tricky, as the exporter should come from the worker that has the returned reference. We may need to delay this so that the solver loads this. // This is actually tricky, as the exporter should come from the worker that has the returned reference. We may need to delay this so that the solver loads this.
w, err := c.opt.WorkerController.GetDefault() w, err := c.opt.WorkerController.GetDefault()
@ -343,25 +354,29 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*
return nil, err return nil, err
} }
// if SOURCE_DATE_EPOCH is set, enable it for the exporter // if SOURCE_DATE_EPOCH is set, enable it for the exporters
if v, ok := epoch.ParseBuildArgs(req.FrontendAttrs); ok { if v, ok := epoch.ParseBuildArgs(req.FrontendAttrs); ok {
if _, ok := req.ExporterAttrs[string(exptypes.OptKeySourceDateEpoch)]; !ok { for _, ex := range req.Exporters {
if req.ExporterAttrs == nil { if _, ok := ex.Attrs[string(exptypes.OptKeySourceDateEpoch)]; !ok {
req.ExporterAttrs = make(map[string]string) if ex.Attrs == nil {
ex.Attrs = make(map[string]string)
}
ex.Attrs[string(exptypes.OptKeySourceDateEpoch)] = v
} }
req.ExporterAttrs[string(exptypes.OptKeySourceDateEpoch)] = v
} }
} }
if req.Exporter != "" { var expis []exporter.ExporterInstance
exp, err := w.Exporter(req.Exporter, c.opt.SessionManager) for i, ex := range req.Exporters {
exp, err := w.Exporter(ex.Type, c.opt.SessionManager)
if err != nil { if err != nil {
return nil, err return nil, err
} }
expi, err = exp.Resolve(ctx, req.ExporterAttrs) expi, err := exp.Resolve(ctx, i, ex.Attrs)
if err != nil { if err != nil {
return nil, err return nil, err
} }
expis = append(expis, expi)
} }
if c, err := findDuplicateCacheOptions(req.Cache.Exports); err != nil { if c, err := findDuplicateCacheOptions(req.Cache.Exports); err != nil {
@ -456,10 +471,8 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*
FrontendInputs: req.FrontendInputs, FrontendInputs: req.FrontendInputs,
CacheImports: cacheImports, CacheImports: cacheImports,
}, llbsolver.ExporterRequest{ }, llbsolver.ExporterRequest{
Exporter: expi, Exporters: expis,
CacheExporters: cacheExporters, CacheExporters: cacheExporters,
Type: req.Exporter,
Attrs: req.ExporterAttrs,
}, req.Entitlements, procs, req.Internal, req.SourcePolicy) }, req.Entitlements, procs, req.Internal, req.SourcePolicy)
if err != nil { if err != nil {
return nil, err return nil, err
@ -508,10 +521,10 @@ func (c *Controller) Session(stream controlapi.Control_SessionServer) error {
conn, closeCh, opts := grpchijack.Hijack(stream) conn, closeCh, opts := grpchijack.Hijack(stream)
defer conn.Close() defer conn.Close()
ctx, cancel := context.WithCancel(stream.Context()) ctx, cancel := context.WithCancelCause(stream.Context())
go func() { go func() {
<-closeCh <-closeCh
cancel() cancel(errors.WithStack(context.Canceled))
}() }()
err := c.opt.SessionManager.HandleConn(ctx, conn, opts) err := c.opt.SessionManager.HandleConn(ctx, conn, opts)

View file

@ -8,6 +8,7 @@ import (
"github.com/moby/buildkit/client/buildid" "github.com/moby/buildkit/client/buildid"
"github.com/moby/buildkit/frontend/gateway" "github.com/moby/buildkit/frontend/gateway"
gwapi "github.com/moby/buildkit/frontend/gateway/pb" gwapi "github.com/moby/buildkit/frontend/gateway/pb"
"github.com/moby/buildkit/solver/errdefs"
"github.com/pkg/errors" "github.com/pkg/errors"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
@ -58,8 +59,9 @@ func (gwf *GatewayForwarder) lookupForwarder(ctx context.Context) (gateway.LLBBr
return nil, errors.New("no buildid found in context") return nil, errors.New("no buildid found in context")
} }
ctx, cancel := context.WithTimeout(ctx, 3*time.Second) ctx, cancel := context.WithCancelCause(ctx)
defer cancel() ctx, _ = context.WithTimeoutCause(ctx, 3*time.Second, errors.WithStack(context.DeadlineExceeded))
defer cancel(errors.WithStack(context.Canceled))
go func() { go func() {
<-ctx.Done() <-ctx.Done()
@ -73,7 +75,7 @@ func (gwf *GatewayForwarder) lookupForwarder(ctx context.Context) (gateway.LLBBr
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return nil, errors.Errorf("no such job %s", bid) return nil, errdefs.NewUnknownJobError(bid)
default: default:
} }
fwd, ok := gwf.builds[bid] fwd, ok := gwf.builds[bid]
@ -94,6 +96,15 @@ func (gwf *GatewayForwarder) ResolveImageConfig(ctx context.Context, req *gwapi.
return fwd.ResolveImageConfig(ctx, req) return fwd.ResolveImageConfig(ctx, req)
} }
func (gwf *GatewayForwarder) ResolveSourceMeta(ctx context.Context, req *gwapi.ResolveSourceMetaRequest) (*gwapi.ResolveSourceMetaResponse, error) {
fwd, err := gwf.lookupForwarder(ctx)
if err != nil {
return nil, errors.Wrap(err, "forwarding ResolveSourceMeta")
}
return fwd.ResolveSourceMeta(ctx, req)
}
func (gwf *GatewayForwarder) Solve(ctx context.Context, req *gwapi.SolveRequest) (*gwapi.SolveResponse, error) { func (gwf *GatewayForwarder) Solve(ctx context.Context, req *gwapi.SolveRequest) (*gwapi.SolveResponse, error) {
fwd, err := gwf.lookupForwarder(ctx) fwd, err := gwf.lookupForwarder(ctx)
if err != nil { if err != nil {

View file

@ -16,19 +16,13 @@ import (
"github.com/containerd/containerd" "github.com/containerd/containerd"
"github.com/containerd/containerd/cio" "github.com/containerd/containerd/cio"
"github.com/containerd/containerd/mount" "github.com/containerd/containerd/mount"
containerdoci "github.com/containerd/containerd/oci"
"github.com/containerd/continuity/fs"
"github.com/docker/docker/pkg/idtools"
"github.com/moby/buildkit/executor" "github.com/moby/buildkit/executor"
"github.com/moby/buildkit/executor/oci" "github.com/moby/buildkit/executor/oci"
resourcestypes "github.com/moby/buildkit/executor/resources/types" resourcestypes "github.com/moby/buildkit/executor/resources/types"
gatewayapi "github.com/moby/buildkit/frontend/gateway/pb" gatewayapi "github.com/moby/buildkit/frontend/gateway/pb"
"github.com/moby/buildkit/identity" "github.com/moby/buildkit/identity"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/network" "github.com/moby/buildkit/util/network"
rootlessspecconv "github.com/moby/buildkit/util/rootless/specconv"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -38,12 +32,13 @@ type containerdExecutor struct {
networkProviders map[pb.NetMode]network.Provider networkProviders map[pb.NetMode]network.Provider
cgroupParent string cgroupParent string
dnsConfig *oci.DNSConfig dnsConfig *oci.DNSConfig
running map[string]chan error running map[string]*containerState
mu sync.Mutex mu sync.Mutex
apparmorProfile string apparmorProfile string
selinux bool selinux bool
traceSocket string traceSocket string
rootless bool rootless bool
runtime *RuntimeInfo
} }
// OnCreateRuntimer provides an alternative to OCI hooks for applying network // OnCreateRuntimer provides an alternative to OCI hooks for applying network
@ -59,8 +54,14 @@ type OnCreateRuntimer interface {
OnCreateRuntime(pid uint32) error OnCreateRuntime(pid uint32) error
} }
type RuntimeInfo struct {
Name string
Path string
Options any
}
// New creates a new executor backed by connection to containerd API // New creates a new executor backed by connection to containerd API
func New(client *containerd.Client, root, cgroup string, networkProviders map[pb.NetMode]network.Provider, dnsConfig *oci.DNSConfig, apparmorProfile string, selinux bool, traceSocket string, rootless bool) executor.Executor { func New(client *containerd.Client, root, cgroup string, networkProviders map[pb.NetMode]network.Provider, dnsConfig *oci.DNSConfig, apparmorProfile string, selinux bool, traceSocket string, rootless bool, runtime *RuntimeInfo) executor.Executor {
// clean up old hosts/resolv.conf file. ignore errors // clean up old hosts/resolv.conf file. ignore errors
os.RemoveAll(filepath.Join(root, "hosts")) os.RemoveAll(filepath.Join(root, "hosts"))
os.RemoveAll(filepath.Join(root, "resolv.conf")) os.RemoveAll(filepath.Join(root, "resolv.conf"))
@ -71,14 +72,25 @@ func New(client *containerd.Client, root, cgroup string, networkProviders map[pb
networkProviders: networkProviders, networkProviders: networkProviders,
cgroupParent: cgroup, cgroupParent: cgroup,
dnsConfig: dnsConfig, dnsConfig: dnsConfig,
running: make(map[string]chan error), running: make(map[string]*containerState),
apparmorProfile: apparmorProfile, apparmorProfile: apparmorProfile,
selinux: selinux, selinux: selinux,
traceSocket: traceSocket, traceSocket: traceSocket,
rootless: rootless, rootless: rootless,
runtime: runtime,
} }
} }
type containerState struct {
done chan error
// On linux the rootfsPath is used to ensure the CWD exists, to fetch user information
// and as a bind mount for the root FS of the container.
rootfsPath string
// On Windows we need to use the root mounts to achieve the same thing that Linux does
// with rootfsPath. So we save both in details.
rootMounts []mount.Mount
}
func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (rec resourcestypes.Recorder, err error) { func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (rec resourcestypes.Recorder, err error) {
if id == "" { if id == "" {
id = identity.NewID() id = identity.NewID()
@ -86,8 +98,11 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M
startedOnce := sync.Once{} startedOnce := sync.Once{}
done := make(chan error, 1) done := make(chan error, 1)
details := &containerState{
done: done,
}
w.mu.Lock() w.mu.Lock()
w.running[id] = done w.running[id] = details
w.mu.Unlock() w.mu.Unlock()
defer func() { defer func() {
w.mu.Lock() w.mu.Lock()
@ -103,96 +118,49 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M
}() }()
meta := process.Meta meta := process.Meta
if meta.NetMode == pb.NetMode_HOST {
resolvConf, err := oci.GetResolvConf(ctx, w.root, nil, w.dnsConfig) bklog.G(ctx).Info("enabling HostNetworking")
if err != nil {
return nil, err
}
hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, nil, meta.Hostname)
if err != nil {
return nil, err
}
if clean != nil {
defer clean()
}
mountable, err := root.Src.Mount(ctx, false)
if err != nil {
return nil, err
}
rootMounts, release, err := mountable.Mount()
if err != nil {
return nil, err
}
if release != nil {
defer release()
}
lm := snapshot.LocalMounterWithMounts(rootMounts)
rootfsPath, err := lm.Mount()
if err != nil {
return nil, err
}
defer lm.Unmount()
defer executor.MountStubsCleaner(ctx, rootfsPath, mounts, meta.RemoveMountStubsRecursive)()
uid, gid, sgids, err := oci.GetUser(rootfsPath, meta.User)
if err != nil {
return nil, err
}
identity := idtools.Identity{
UID: int(uid),
GID: int(gid),
}
newp, err := fs.RootPath(rootfsPath, meta.Cwd)
if err != nil {
return nil, errors.Wrapf(err, "working dir %s points to invalid target", newp)
}
if _, err := os.Stat(newp); err != nil {
if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil {
return nil, errors.Wrapf(err, "failed to create working directory %s", newp)
}
} }
provider, ok := w.networkProviders[meta.NetMode] provider, ok := w.networkProviders[meta.NetMode]
if !ok { if !ok {
return nil, errors.Errorf("unknown network mode %s", meta.NetMode) return nil, errors.Errorf("unknown network mode %s", meta.NetMode)
} }
resolvConf, hostsFile, releasers, err := w.prepareExecutionEnv(ctx, root, mounts, meta, details, meta.NetMode)
if err != nil {
return nil, err
}
if releasers != nil {
defer releasers()
}
if err := w.ensureCWD(ctx, details, meta); err != nil {
return nil, err
}
namespace, err := provider.New(ctx, meta.Hostname) namespace, err := provider.New(ctx, meta.Hostname)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer namespace.Close() defer namespace.Close()
if meta.NetMode == pb.NetMode_HOST { spec, releaseSpec, err := w.createOCISpec(ctx, id, resolvConf, hostsFile, namespace, mounts, meta, details)
bklog.G(ctx).Info("enabling HostNetworking")
}
opts := []containerdoci.SpecOpts{oci.WithUIDGID(uid, gid, sgids)}
if meta.ReadonlyRootFS {
opts = append(opts, containerdoci.WithRootFSReadonly())
}
processMode := oci.ProcessSandbox // FIXME(AkihiroSuda)
spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, processMode, nil, w.apparmorProfile, w.selinux, w.traceSocket, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer cleanup() if releaseSpec != nil {
spec.Process.Terminal = meta.Tty defer releaseSpec()
if w.rootless {
if err := rootlessspecconv.ToRootless(spec); err != nil {
return nil, err
}
} }
container, err := w.client.NewContainer(ctx, id, opts := []containerd.NewContainerOpts{
containerd.WithSpec(spec), containerd.WithSpec(spec),
) }
if w.runtime != nil {
opts = append(opts, containerd.WithRuntime(w.runtime.Name, w.runtime.Options))
}
container, err := w.client.NewContainer(ctx, id, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -209,11 +177,14 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M
cioOpts = append(cioOpts, cio.WithTerminal) cioOpts = append(cioOpts, cio.WithTerminal)
} }
task, err := container.NewTask(ctx, cio.NewCreator(cioOpts...), containerd.WithRootFS([]mount.Mount{{ taskOpts, err := details.getTaskOpts()
Source: rootfsPath, if err != nil {
Type: "bind", return nil, err
Options: []string{"rbind"}, }
}})) if w.runtime != nil && w.runtime.Path != "" {
taskOpts = append(taskOpts, containerd.WithRuntimePath(w.runtime.Path))
}
task, err := container.NewTask(ctx, cio.NewCreator(cioOpts...), taskOpts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -249,17 +220,16 @@ func (w *containerdExecutor) Exec(ctx context.Context, id string, process execut
// is in the process of being created and check again every 100ms or until // is in the process of being created and check again every 100ms or until
// context is canceled. // context is canceled.
w.mu.Lock()
details, ok := w.running[id]
w.mu.Unlock()
if !ok {
return errors.Errorf("container %s not found", id)
}
var container containerd.Container var container containerd.Container
var task containerd.Task var task containerd.Task
for { for {
w.mu.Lock()
done, ok := w.running[id]
w.mu.Unlock()
if !ok {
return errors.Errorf("container %s not found", id)
}
if container == nil { if container == nil {
container, _ = w.client.LoadContainer(ctx, id) container, _ = w.client.LoadContainer(ctx, id)
} }
@ -274,8 +244,8 @@ func (w *containerdExecutor) Exec(ctx context.Context, id string, process execut
} }
select { select {
case <-ctx.Done(): case <-ctx.Done():
return ctx.Err() return context.Cause(ctx)
case err, ok := <-done: case err, ok := <-details.done:
if !ok || err == nil { if !ok || err == nil {
return errors.Errorf("container %s has stopped", id) return errors.Errorf("container %s has stopped", id)
} }
@ -291,23 +261,20 @@ func (w *containerdExecutor) Exec(ctx context.Context, id string, process execut
} }
proc := spec.Process proc := spec.Process
// TODO how do we get rootfsPath for oci.GetUser in case user passed in username rather than uid:gid?
// For now only support uid:gid
if meta.User != "" { if meta.User != "" {
uid, gid, err := oci.ParseUIDGID(meta.User) userSpec, err := getUserSpec(meta.User, details.rootfsPath)
if err != nil { if err != nil {
return errors.WithStack(err) return errors.WithStack(err)
} }
proc.User = specs.User{ proc.User = userSpec
UID: uid,
GID: gid,
AdditionalGids: []uint32{},
}
} }
proc.Terminal = meta.Tty proc.Terminal = meta.Tty
proc.Args = meta.Args // setArgs will set the proper command line arguments for this process.
// On Windows, this will set the CommandLine field. On Linux it will set the
// Args field.
setArgs(proc, meta.Args)
if meta.Cwd != "" { if meta.Cwd != "" {
spec.Process.Cwd = meta.Cwd spec.Process.Cwd = meta.Cwd
} }
@ -370,8 +337,8 @@ func (w *containerdExecutor) runProcess(ctx context.Context, p containerd.Proces
// handle signals (and resize) in separate go loop so it does not // handle signals (and resize) in separate go loop so it does not
// potentially block the container cancel/exit status loop below. // potentially block the container cancel/exit status loop below.
eventCtx, eventCancel := context.WithCancel(ctx) eventCtx, eventCancel := context.WithCancelCause(ctx)
defer eventCancel() defer eventCancel(errors.WithStack(context.Canceled))
go func() { go func() {
for { for {
select { select {
@ -405,7 +372,7 @@ func (w *containerdExecutor) runProcess(ctx context.Context, p containerd.Proces
} }
}() }()
var cancel func() var cancel func(error)
var killCtxDone <-chan struct{} var killCtxDone <-chan struct{}
ctxDone := ctx.Done() ctxDone := ctx.Done()
for { for {
@ -413,13 +380,14 @@ func (w *containerdExecutor) runProcess(ctx context.Context, p containerd.Proces
case <-ctxDone: case <-ctxDone:
ctxDone = nil ctxDone = nil
var killCtx context.Context var killCtx context.Context
killCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second) killCtx, cancel = context.WithCancelCause(context.Background())
killCtx, _ = context.WithTimeoutCause(killCtx, 10*time.Second, errors.WithStack(context.DeadlineExceeded))
killCtxDone = killCtx.Done() killCtxDone = killCtx.Done()
p.Kill(killCtx, syscall.SIGKILL) p.Kill(killCtx, syscall.SIGKILL)
io.Cancel() io.Cancel()
case status := <-statusCh: case status := <-statusCh:
if cancel != nil { if cancel != nil {
cancel() cancel(errors.WithStack(context.Canceled))
} }
trace.SpanFromContext(ctx).AddEvent( trace.SpanFromContext(ctx).AddEvent(
"Container exited", "Container exited",
@ -437,7 +405,7 @@ func (w *containerdExecutor) runProcess(ctx context.Context, p containerd.Proces
} }
select { select {
case <-ctx.Done(): case <-ctx.Done():
exitErr.Err = errors.Wrap(ctx.Err(), exitErr.Error()) exitErr.Err = errors.Wrap(context.Cause(ctx), exitErr.Error())
default: default:
} }
return exitErr return exitErr
@ -445,7 +413,7 @@ func (w *containerdExecutor) runProcess(ctx context.Context, p containerd.Proces
return nil return nil
case <-killCtxDone: case <-killCtxDone:
if cancel != nil { if cancel != nil {
cancel() cancel(errors.WithStack(context.Canceled))
} }
io.Cancel() io.Cancel()
return errors.Errorf("failed to kill process on cancel") return errors.Errorf("failed to kill process on cancel")

View file

@ -0,0 +1,183 @@
//go:build !windows
// +build !windows
package containerdexecutor
import (
"context"
"os"
"runtime"
"github.com/containerd/containerd"
"github.com/containerd/containerd/mount"
containerdoci "github.com/containerd/containerd/oci"
"github.com/containerd/continuity/fs"
"github.com/docker/docker/pkg/idtools"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/executor/oci"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/network"
rootlessspecconv "github.com/moby/buildkit/util/rootless/specconv"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
func getUserSpec(user, rootfsPath string) (specs.User, error) {
var err error
var uid, gid uint32
var sgids []uint32
if rootfsPath != "" {
uid, gid, sgids, err = oci.GetUser(rootfsPath, user)
} else {
uid, gid, err = oci.ParseUIDGID(user)
}
if err != nil {
return specs.User{}, errors.WithStack(err)
}
return specs.User{
UID: uid,
GID: gid,
AdditionalGids: sgids,
}, nil
}
func (w *containerdExecutor) prepareExecutionEnv(ctx context.Context, rootMount executor.Mount, mounts []executor.Mount, meta executor.Meta, details *containerState, netMode pb.NetMode) (string, string, func(), error) {
var releasers []func()
releaseAll := func() {
for i := len(releasers) - 1; i >= 0; i-- {
releasers[i]()
}
}
resolvConf, err := oci.GetResolvConf(ctx, w.root, nil, w.dnsConfig, netMode)
if err != nil {
releaseAll()
return "", "", nil, err
}
hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, nil, meta.Hostname)
if err != nil {
releaseAll()
return "", "", nil, err
}
if clean != nil {
releasers = append(releasers, clean)
}
mountable, err := rootMount.Src.Mount(ctx, false)
if err != nil {
releaseAll()
return "", "", nil, err
}
rootMounts, release, err := mountable.Mount()
if err != nil {
releaseAll()
return "", "", nil, err
}
details.rootMounts = rootMounts
if release != nil {
releasers = append(releasers, func() {
if err := release(); err != nil {
bklog.G(ctx).WithError(err).Error("failed to release root mount")
}
})
}
lm := snapshot.LocalMounterWithMounts(rootMounts)
rootfsPath, err := lm.Mount()
if err != nil {
releaseAll()
return "", "", nil, err
}
details.rootfsPath = rootfsPath
releasers = append(releasers, func() {
if err := lm.Unmount(); err != nil {
bklog.G(ctx).WithError(err).Error("failed to unmount rootfs")
}
})
releasers = append(releasers, executor.MountStubsCleaner(ctx, details.rootfsPath, mounts, meta.RemoveMountStubsRecursive))
return resolvConf, hostsFile, releaseAll, nil
}
func (w *containerdExecutor) ensureCWD(ctx context.Context, details *containerState, meta executor.Meta) error {
newp, err := fs.RootPath(details.rootfsPath, meta.Cwd)
if err != nil {
return errors.Wrapf(err, "working dir %s points to invalid target", newp)
}
uid, gid, _, err := oci.GetUser(details.rootfsPath, meta.User)
if err != nil {
return err
}
identity := idtools.Identity{
UID: int(uid),
GID: int(gid),
}
if _, err := os.Stat(newp); err != nil {
if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil {
return errors.Wrapf(err, "failed to create working directory %s", newp)
}
}
return nil
}
func (w *containerdExecutor) createOCISpec(ctx context.Context, id, resolvConf, hostsFile string, namespace network.Namespace, mounts []executor.Mount, meta executor.Meta, details *containerState) (*specs.Spec, func(), error) {
var releasers []func()
releaseAll := func() {
for i := len(releasers) - 1; i >= 0; i-- {
releasers[i]()
}
}
uid, gid, sgids, err := oci.GetUser(details.rootfsPath, meta.User)
if err != nil {
releaseAll()
return nil, nil, err
}
opts := []containerdoci.SpecOpts{oci.WithUIDGID(uid, gid, sgids)}
if meta.ReadonlyRootFS {
opts = append(opts, containerdoci.WithRootFSReadonly())
}
processMode := oci.ProcessSandbox // FIXME(AkihiroSuda)
spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, processMode, nil, w.apparmorProfile, w.selinux, w.traceSocket, opts...)
if err != nil {
releaseAll()
return nil, nil, err
}
releasers = append(releasers, cleanup)
spec.Process.Terminal = meta.Tty
if w.rootless {
if err := rootlessspecconv.ToRootless(spec); err != nil {
releaseAll()
return nil, nil, err
}
}
return spec, releaseAll, nil
}
func (d *containerState) getTaskOpts() ([]containerd.NewTaskOpts, error) {
rootfs := containerd.WithRootFS([]mount.Mount{{
Source: d.rootfsPath,
Type: "bind",
Options: []string{"rbind"},
}})
if runtime.GOOS == "freebsd" {
rootfs = containerd.WithRootFS([]mount.Mount{{
Source: d.rootfsPath,
Type: "nullfs",
Options: []string{},
}})
}
return []containerd.NewTaskOpts{rootfs}, nil
}
func setArgs(spec *specs.Process, args []string) {
spec.Args = args
}

View file

@ -0,0 +1,106 @@
package containerdexecutor
import (
"context"
"os"
"strings"
"github.com/containerd/containerd"
containerdoci "github.com/containerd/containerd/oci"
"github.com/containerd/continuity/fs"
"github.com/docker/docker/pkg/idtools"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/executor/oci"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/network"
"github.com/moby/buildkit/util/windows"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
func getUserSpec(user, rootfsPath string) (specs.User, error) {
return specs.User{
Username: user,
}, nil
}
func (w *containerdExecutor) prepareExecutionEnv(ctx context.Context, rootMount executor.Mount, mounts []executor.Mount, meta executor.Meta, details *containerState, netMode pb.NetMode) (string, string, func(), error) {
var releasers []func() error
releaseAll := func() {
for _, release := range releasers {
release()
}
}
mountable, err := rootMount.Src.Mount(ctx, false)
if err != nil {
return "", "", releaseAll, err
}
rootMounts, release, err := mountable.Mount()
if err != nil {
return "", "", releaseAll, err
}
details.rootMounts = rootMounts
releasers = append(releasers, release)
return "", "", releaseAll, nil
}
func (w *containerdExecutor) ensureCWD(ctx context.Context, details *containerState, meta executor.Meta) (err error) {
// TODO(gabriel-samfira): Use a snapshot?
identity, err := windows.ResolveUsernameToSID(ctx, w, details.rootMounts, meta.User)
if err != nil {
return errors.Wrap(err, "getting user SID")
}
lm := snapshot.LocalMounterWithMounts(details.rootMounts)
rootfsPath, err := lm.Mount()
if err != nil {
return err
}
defer lm.Unmount()
newp, err := fs.RootPath(rootfsPath, meta.Cwd)
if err != nil {
return errors.Wrapf(err, "working dir %s points to invalid target", newp)
}
if _, err := os.Stat(newp); err != nil {
if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil {
return errors.Wrapf(err, "failed to create working directory %s", newp)
}
}
return nil
}
func (w *containerdExecutor) createOCISpec(ctx context.Context, id, resolvConf, hostsFile string, namespace network.Namespace, mounts []executor.Mount, meta executor.Meta, details *containerState) (*specs.Spec, func(), error) {
var releasers []func()
releaseAll := func() {
for _, release := range releasers {
release()
}
}
opts := []containerdoci.SpecOpts{
containerdoci.WithUser(meta.User),
}
processMode := oci.ProcessSandbox // FIXME(AkihiroSuda)
spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, "", "", namespace, "", processMode, nil, "", false, w.traceSocket, opts...)
if err != nil {
releaseAll()
return nil, nil, err
}
releasers = append(releasers, cleanup)
return spec, releaseAll, nil
}
func (d *containerState) getTaskOpts() ([]containerd.NewTaskOpts, error) {
return []containerd.NewTaskOpts{containerd.WithRootFS(d.rootMounts)}, nil
}
func setArgs(spec *specs.Process, args []string) {
spec.CommandLine = strings.Join(args, " ")
}

View file

@ -24,30 +24,6 @@ func withRemovedMount(destination string) oci.SpecOpts {
} }
} }
func withROBind(src, dest string) oci.SpecOpts {
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
s.Mounts = append(s.Mounts, specs.Mount{
Destination: dest,
Type: "bind",
Source: src,
Options: []string{"nosuid", "noexec", "nodev", "rbind", "ro"},
})
return nil
}
}
func withCGroup() oci.SpecOpts {
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
s.Mounts = append(s.Mounts, specs.Mount{
Destination: "/sys/fs/cgroup",
Type: "cgroup",
Source: "cgroup",
Options: []string{"ro", "nosuid", "noexec", "nodev"},
})
return nil
}
}
func hasPrefix(p, prefixDir string) bool { func hasPrefix(p, prefixDir string) bool {
prefixDir = filepath.Clean(prefixDir) prefixDir = filepath.Clean(prefixDir)
if filepath.Base(prefixDir) == string(filepath.Separator) { if filepath.Base(prefixDir) == string(filepath.Separator) {
@ -57,49 +33,6 @@ func hasPrefix(p, prefixDir string) bool {
return p == prefixDir || strings.HasPrefix(p, prefixDir+string(filepath.Separator)) return p == prefixDir || strings.HasPrefix(p, prefixDir+string(filepath.Separator))
} }
func removeMountsWithPrefix(mounts []specs.Mount, prefixDir string) []specs.Mount {
var ret []specs.Mount
for _, m := range mounts {
if !hasPrefix(m.Destination, prefixDir) {
ret = append(ret, m)
}
}
return ret
}
func withBoundProc() oci.SpecOpts {
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
s.Mounts = removeMountsWithPrefix(s.Mounts, "/proc")
procMount := specs.Mount{
Destination: "/proc",
Type: "bind",
Source: "/proc",
// NOTE: "rbind"+"ro" does not make /proc read-only recursively.
// So we keep maskedPath and readonlyPaths (although not mandatory for rootless mode)
Options: []string{"rbind"},
}
s.Mounts = append([]specs.Mount{procMount}, s.Mounts...)
var maskedPaths []string
for _, s := range s.Linux.MaskedPaths {
if !hasPrefix(s, "/proc") {
maskedPaths = append(maskedPaths, s)
}
}
s.Linux.MaskedPaths = maskedPaths
var readonlyPaths []string
for _, s := range s.Linux.ReadonlyPaths {
if !hasPrefix(s, "/proc") {
readonlyPaths = append(readonlyPaths, s)
}
}
s.Linux.ReadonlyPaths = readonlyPaths
return nil
}
}
func dedupMounts(mnts []specs.Mount) []specs.Mount { func dedupMounts(mnts []specs.Mount) []specs.Mount {
ret := make([]specs.Mount, 0, len(mnts)) ret := make([]specs.Mount, 0, len(mnts))
visited := make(map[string]int) visited := make(map[string]int)

View file

@ -7,6 +7,7 @@ import (
"github.com/docker/docker/libnetwork/resolvconf" "github.com/docker/docker/libnetwork/resolvconf"
"github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/idtools"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/flightcontrol"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -24,9 +25,13 @@ type DNSConfig struct {
SearchDomains []string SearchDomains []string
} }
func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.IdentityMapping, dns *DNSConfig) (string, error) { func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.IdentityMapping, dns *DNSConfig, netMode pb.NetMode) (string, error) {
p := filepath.Join(stateDir, "resolv.conf") p := filepath.Join(stateDir, "resolv.conf")
_, err := g.Do(ctx, stateDir, func(ctx context.Context) (struct{}, error) { if netMode == pb.NetMode_HOST {
p = filepath.Join(stateDir, "resolv-host.conf")
}
_, err := g.Do(ctx, p, func(ctx context.Context) (struct{}, error) {
generate := !notFirstRun generate := !notFirstRun
notFirstRun = true notFirstRun = true
@ -65,7 +70,6 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.Identity
return struct{}{}, err return struct{}{}, err
} }
var f *resolvconf.File
tmpPath := p + ".tmp" tmpPath := p + ".tmp"
if dns != nil { if dns != nil {
var ( var (
@ -83,19 +87,22 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.Identity
dnsOptions = resolvconf.GetOptions(dt) dnsOptions = resolvconf.GetOptions(dt)
} }
f, err = resolvconf.Build(tmpPath, dnsNameservers, dnsSearchDomains, dnsOptions) f, err := resolvconf.Build(tmpPath, dnsNameservers, dnsSearchDomains, dnsOptions)
if err != nil { if err != nil {
return struct{}{}, err return struct{}{}, err
} }
dt = f.Content dt = f.Content
} }
f, err = resolvconf.FilterResolvDNS(dt, true) if netMode != pb.NetMode_HOST || len(resolvconf.GetNameservers(dt, resolvconf.IP)) == 0 {
if err != nil { f, err := resolvconf.FilterResolvDNS(dt, true)
return struct{}{}, err if err != nil {
return struct{}{}, err
}
dt = f.Content
} }
if err := os.WriteFile(tmpPath, f.Content, 0644); err != nil { if err := os.WriteFile(tmpPath, dt, 0644); err != nil {
return struct{}{}, err return struct{}{}, err
} }

View file

@ -4,6 +4,7 @@ import (
"context" "context"
"path" "path"
"path/filepath" "path/filepath"
"runtime"
"strings" "strings"
"sync" "sync"
@ -124,7 +125,7 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou
} }
opts = append(opts, opts = append(opts,
oci.WithProcessArgs(meta.Args...), withProcessArgs(meta.Args...),
oci.WithEnv(meta.Env), oci.WithEnv(meta.Env),
oci.WithProcessCwd(meta.Cwd), oci.WithProcessCwd(meta.Cwd),
oci.WithNewPrivileges, oci.WithNewPrivileges,
@ -196,7 +197,9 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou
} }
if tracingSocket != "" { if tracingSocket != "" {
s.Mounts = append(s.Mounts, getTracingSocketMount(tracingSocket)) if mount := getTracingSocketMount(tracingSocket); mount != nil {
s.Mounts = append(s.Mounts, *mount)
}
} }
s.Mounts = dedupMounts(s.Mounts) s.Mounts = dedupMounts(s.Mounts)
@ -254,17 +257,24 @@ func (s *submounts) subMount(m mount.Mount, subPath string) (mount.Mount, error)
return mount.Mount{}, err return mount.Mount{}, err
} }
opts := []string{"rbind"} var mntType string
for _, opt := range m.Options { opts := []string{}
if opt == "ro" { if m.ReadOnly() {
opts = append(opts, opt) opts = append(opts, "ro")
} }
if runtime.GOOS != "windows" {
// Windows uses a mechanism similar to bind mounts, but will err out if we request
// a mount type it does not understand. Leaving the mount type empty on Windows will
// yield the same result.
mntType = "bind"
opts = append(opts, "rbind")
} }
s.m[h] = mountRef{ s.m[h] = mountRef{
mount: mount.Mount{ mount: mount.Mount{
Source: mp, Source: mp,
Type: "bind", Type: mntType,
Options: opts, Options: opts,
}, },
unmount: lm.Unmount, unmount: lm.Unmount,
@ -298,15 +308,3 @@ func (s *submounts) cleanup() {
} }
wg.Wait() wg.Wait()
} }
func specMapping(s []idtools.IDMap) []specs.LinuxIDMapping {
var ids []specs.LinuxIDMapping
for _, item := range s {
ids = append(ids, specs.LinuxIDMapping{
HostID: uint32(item.HostID),
ContainerID: uint32(item.ContainerID),
Size: uint32(item.Size),
})
}
return ids
}

View file

@ -2,9 +2,66 @@ package oci
import ( import (
"github.com/containerd/containerd/mount" "github.com/containerd/containerd/mount"
"github.com/containerd/containerd/oci"
"github.com/containerd/continuity/fs" "github.com/containerd/continuity/fs"
"github.com/docker/docker/pkg/idtools"
"github.com/moby/buildkit/solver/pb"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
) )
func withProcessArgs(args ...string) oci.SpecOpts {
return oci.WithProcessArgs(args...)
}
func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) {
return nil, nil
}
// generateSecurityOpts may affect mounts, so must be called after generateMountOpts
func generateSecurityOpts(mode pb.SecurityMode, apparmorProfile string, selinuxB bool) ([]oci.SpecOpts, error) {
if mode == pb.SecurityMode_INSECURE {
return nil, errors.New("no support for running in insecure mode on FreeBSD")
}
return nil, nil
}
// generateProcessModeOpts may affect mounts, so must be called after generateMountOpts
func generateProcessModeOpts(mode ProcessMode) ([]oci.SpecOpts, error) {
if mode == NoProcessSandbox {
return nil, errors.New("no support for NoProcessSandbox on FreeBSD")
}
return nil, nil
}
func generateIDmapOpts(idmap *idtools.IdentityMapping) ([]oci.SpecOpts, error) {
if idmap == nil {
return nil, nil
}
return nil, errors.New("no support for IdentityMapping on FreeBSD")
}
func generateRlimitOpts(ulimits []*pb.Ulimit) ([]oci.SpecOpts, error) {
if len(ulimits) == 0 {
return nil, nil
}
return nil, errors.New("no support for POSIXRlimit on FreeBSD")
}
// tracing is not implemented on FreeBSD
func getTracingSocketMount(socket string) *specs.Mount {
return nil
}
// tracing is not implemented on FreeBSD
func getTracingSocket() string {
return ""
}
func cgroupV2NamespaceSupported() bool {
return false
}
func sub(m mount.Mount, subPath string) (mount.Mount, func() error, error) { func sub(m mount.Mount, subPath string) (mount.Mount, func() error, error) {
src, err := fs.RootPath(m.Source, subPath) src, err := fs.RootPath(m.Source, subPath)
if err != nil { if err != nil {

View file

@ -1,19 +1,254 @@
//go:build linux
// +build linux
package oci package oci
import ( import (
"context"
"fmt"
"os" "os"
"strconv" "strconv"
"strings"
"sync"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/mount" "github.com/containerd/containerd/mount"
"github.com/containerd/containerd/oci"
cdseccomp "github.com/containerd/containerd/pkg/seccomp"
"github.com/containerd/continuity/fs" "github.com/containerd/continuity/fs"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/profiles/seccomp"
"github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/entitlements/security"
specs "github.com/opencontainers/runtime-spec/specs-go"
selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
var (
cgroupNSOnce sync.Once
supportsCgroupNS bool
)
const (
tracingSocketPath = "/dev/otel-grpc.sock"
)
func withProcessArgs(args ...string) oci.SpecOpts {
return oci.WithProcessArgs(args...)
}
func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) {
return []oci.SpecOpts{
// https://github.com/moby/buildkit/issues/429
withRemovedMount("/run"),
withROBind(resolvConf, "/etc/resolv.conf"),
withROBind(hostsFile, "/etc/hosts"),
withCGroup(),
}, nil
}
// generateSecurityOpts may affect mounts, so must be called after generateMountOpts
func generateSecurityOpts(mode pb.SecurityMode, apparmorProfile string, selinuxB bool) (opts []oci.SpecOpts, _ error) {
if selinuxB && !selinux.GetEnabled() {
return nil, errors.New("selinux is not available")
}
switch mode {
case pb.SecurityMode_INSECURE:
return []oci.SpecOpts{
security.WithInsecureSpec(),
oci.WithWriteableCgroupfs,
oci.WithWriteableSysfs,
func(_ context.Context, _ oci.Client, _ *containers.Container, s *oci.Spec) error {
var err error
if selinuxB {
s.Process.SelinuxLabel, s.Linux.MountLabel, err = label.InitLabels([]string{"disable"})
}
return err
},
}, nil
case pb.SecurityMode_SANDBOX:
if cdseccomp.IsEnabled() {
opts = append(opts, withDefaultProfile())
}
if apparmorProfile != "" {
opts = append(opts, oci.WithApparmorProfile(apparmorProfile))
}
opts = append(opts, func(_ context.Context, _ oci.Client, _ *containers.Container, s *oci.Spec) error {
var err error
if selinuxB {
s.Process.SelinuxLabel, s.Linux.MountLabel, err = label.InitLabels(nil)
}
return err
})
return opts, nil
}
return nil, nil
}
// generateProcessModeOpts may affect mounts, so must be called after generateMountOpts
func generateProcessModeOpts(mode ProcessMode) ([]oci.SpecOpts, error) {
if mode == NoProcessSandbox {
return []oci.SpecOpts{
oci.WithHostNamespace(specs.PIDNamespace),
withBoundProc(),
}, nil
// TODO(AkihiroSuda): Configure seccomp to disable ptrace (and prctl?) explicitly
}
return nil, nil
}
func generateIDmapOpts(idmap *idtools.IdentityMapping) ([]oci.SpecOpts, error) {
if idmap == nil {
return nil, nil
}
return []oci.SpecOpts{
oci.WithUserNamespace(specMapping(idmap.UIDMaps), specMapping(idmap.GIDMaps)),
}, nil
}
func specMapping(s []idtools.IDMap) []specs.LinuxIDMapping {
var ids []specs.LinuxIDMapping
for _, item := range s {
ids = append(ids, specs.LinuxIDMapping{
HostID: uint32(item.HostID),
ContainerID: uint32(item.ContainerID),
Size: uint32(item.Size),
})
}
return ids
}
func generateRlimitOpts(ulimits []*pb.Ulimit) ([]oci.SpecOpts, error) {
if len(ulimits) == 0 {
return nil, nil
}
var rlimits []specs.POSIXRlimit
for _, u := range ulimits {
if u == nil {
continue
}
rlimits = append(rlimits, specs.POSIXRlimit{
Type: fmt.Sprintf("RLIMIT_%s", strings.ToUpper(u.Name)),
Hard: uint64(u.Hard),
Soft: uint64(u.Soft),
})
}
return []oci.SpecOpts{
func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
s.Process.Rlimits = rlimits
return nil
},
}, nil
}
// withDefaultProfile sets the default seccomp profile to the spec.
// Note: must follow the setting of process capabilities
func withDefaultProfile() oci.SpecOpts {
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
var err error
s.Linux.Seccomp, err = seccomp.GetDefaultProfile(s)
return err
}
}
func withROBind(src, dest string) oci.SpecOpts {
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
s.Mounts = append(s.Mounts, specs.Mount{
Destination: dest,
Type: "bind",
Source: src,
Options: []string{"nosuid", "noexec", "nodev", "rbind", "ro"},
})
return nil
}
}
func withCGroup() oci.SpecOpts {
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
s.Mounts = append(s.Mounts, specs.Mount{
Destination: "/sys/fs/cgroup",
Type: "cgroup",
Source: "cgroup",
Options: []string{"ro", "nosuid", "noexec", "nodev"},
})
return nil
}
}
func withBoundProc() oci.SpecOpts {
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
s.Mounts = removeMountsWithPrefix(s.Mounts, "/proc")
procMount := specs.Mount{
Destination: "/proc",
Type: "bind",
Source: "/proc",
// NOTE: "rbind"+"ro" does not make /proc read-only recursively.
// So we keep maskedPath and readonlyPaths (although not mandatory for rootless mode)
Options: []string{"rbind"},
}
s.Mounts = append([]specs.Mount{procMount}, s.Mounts...)
var maskedPaths []string
for _, s := range s.Linux.MaskedPaths {
if !hasPrefix(s, "/proc") {
maskedPaths = append(maskedPaths, s)
}
}
s.Linux.MaskedPaths = maskedPaths
var readonlyPaths []string
for _, s := range s.Linux.ReadonlyPaths {
if !hasPrefix(s, "/proc") {
readonlyPaths = append(readonlyPaths, s)
}
}
s.Linux.ReadonlyPaths = readonlyPaths
return nil
}
}
func removeMountsWithPrefix(mounts []specs.Mount, prefixDir string) []specs.Mount {
var ret []specs.Mount
for _, m := range mounts {
if !hasPrefix(m.Destination, prefixDir) {
ret = append(ret, m)
}
}
return ret
}
func getTracingSocketMount(socket string) *specs.Mount {
return &specs.Mount{
Destination: tracingSocketPath,
Type: "bind",
Source: socket,
Options: []string{"ro", "rbind"},
}
}
func getTracingSocket() string {
return fmt.Sprintf("unix://%s", tracingSocketPath)
}
func cgroupV2NamespaceSupported() bool {
// Check if cgroups v2 namespaces are supported. Trying to do cgroup
// namespaces with cgroups v1 results in EINVAL when we encounter a
// non-standard hierarchy.
// See https://github.com/moby/buildkit/issues/4108
cgroupNSOnce.Do(func() {
if _, err := os.Stat("/proc/self/ns/cgroup"); os.IsNotExist(err) {
return
}
if _, err := os.Stat("/sys/fs/cgroup/cgroup.subtree_control"); os.IsNotExist(err) {
return
}
supportsCgroupNS = true
})
return supportsCgroupNS
}
func sub(m mount.Mount, subPath string) (mount.Mount, func() error, error) { func sub(m mount.Mount, subPath string) (mount.Mount, func() error, error) {
var retries = 10 var retries = 10
root := m.Source root := m.Source

View file

@ -1,165 +0,0 @@
//go:build !windows
// +build !windows
package oci
import (
"context"
"fmt"
"os"
"strings"
"sync"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/oci"
cdseccomp "github.com/containerd/containerd/pkg/seccomp"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/profiles/seccomp"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/entitlements/security"
specs "github.com/opencontainers/runtime-spec/specs-go"
selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
var (
cgroupNSOnce sync.Once
supportsCgroupNS bool
)
const (
tracingSocketPath = "/dev/otel-grpc.sock"
)
func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) {
return []oci.SpecOpts{
// https://github.com/moby/buildkit/issues/429
withRemovedMount("/run"),
withROBind(resolvConf, "/etc/resolv.conf"),
withROBind(hostsFile, "/etc/hosts"),
withCGroup(),
}, nil
}
// generateSecurityOpts may affect mounts, so must be called after generateMountOpts
func generateSecurityOpts(mode pb.SecurityMode, apparmorProfile string, selinuxB bool) (opts []oci.SpecOpts, _ error) {
if selinuxB && !selinux.GetEnabled() {
return nil, errors.New("selinux is not available")
}
switch mode {
case pb.SecurityMode_INSECURE:
return []oci.SpecOpts{
security.WithInsecureSpec(),
oci.WithWriteableCgroupfs,
oci.WithWriteableSysfs,
func(_ context.Context, _ oci.Client, _ *containers.Container, s *oci.Spec) error {
var err error
if selinuxB {
s.Process.SelinuxLabel, s.Linux.MountLabel, err = label.InitLabels([]string{"disable"})
}
return err
},
}, nil
case pb.SecurityMode_SANDBOX:
if cdseccomp.IsEnabled() {
opts = append(opts, withDefaultProfile())
}
if apparmorProfile != "" {
opts = append(opts, oci.WithApparmorProfile(apparmorProfile))
}
opts = append(opts, func(_ context.Context, _ oci.Client, _ *containers.Container, s *oci.Spec) error {
var err error
if selinuxB {
s.Process.SelinuxLabel, s.Linux.MountLabel, err = label.InitLabels(nil)
}
return err
})
return opts, nil
}
return nil, nil
}
// generateProcessModeOpts may affect mounts, so must be called after generateMountOpts
func generateProcessModeOpts(mode ProcessMode) ([]oci.SpecOpts, error) {
if mode == NoProcessSandbox {
return []oci.SpecOpts{
oci.WithHostNamespace(specs.PIDNamespace),
withBoundProc(),
}, nil
// TODO(AkihiroSuda): Configure seccomp to disable ptrace (and prctl?) explicitly
}
return nil, nil
}
func generateIDmapOpts(idmap *idtools.IdentityMapping) ([]oci.SpecOpts, error) {
if idmap == nil {
return nil, nil
}
return []oci.SpecOpts{
oci.WithUserNamespace(specMapping(idmap.UIDMaps), specMapping(idmap.GIDMaps)),
}, nil
}
func generateRlimitOpts(ulimits []*pb.Ulimit) ([]oci.SpecOpts, error) {
if len(ulimits) == 0 {
return nil, nil
}
var rlimits []specs.POSIXRlimit
for _, u := range ulimits {
if u == nil {
continue
}
rlimits = append(rlimits, specs.POSIXRlimit{
Type: fmt.Sprintf("RLIMIT_%s", strings.ToUpper(u.Name)),
Hard: uint64(u.Hard),
Soft: uint64(u.Soft),
})
}
return []oci.SpecOpts{
func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
s.Process.Rlimits = rlimits
return nil
},
}, nil
}
// withDefaultProfile sets the default seccomp profile to the spec.
// Note: must follow the setting of process capabilities
func withDefaultProfile() oci.SpecOpts {
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
var err error
s.Linux.Seccomp, err = seccomp.GetDefaultProfile(s)
return err
}
}
func getTracingSocketMount(socket string) specs.Mount {
return specs.Mount{
Destination: tracingSocketPath,
Type: "bind",
Source: socket,
Options: []string{"ro", "rbind"},
}
}
func getTracingSocket() string {
return fmt.Sprintf("unix://%s", tracingSocketPath)
}
func cgroupV2NamespaceSupported() bool {
// Check if cgroups v2 namespaces are supported. Trying to do cgroup
// namespaces with cgroups v1 results in EINVAL when we encounter a
// non-standard hierarchy.
// See https://github.com/moby/buildkit/issues/4108
cgroupNSOnce.Do(func() {
if _, err := os.Stat("/proc/self/ns/cgroup"); os.IsNotExist(err) {
return
}
if _, err := os.Stat("/sys/fs/cgroup/cgroup.subtree_control"); os.IsNotExist(err) {
return
}
supportsCgroupNS = true
})
return supportsCgroupNS
}

View file

@ -4,9 +4,13 @@
package oci package oci
import ( import (
"context"
"fmt" "fmt"
"os"
"path/filepath" "path/filepath"
"strings"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/mount" "github.com/containerd/containerd/mount"
"github.com/containerd/containerd/oci" "github.com/containerd/containerd/oci"
"github.com/containerd/continuity/fs" "github.com/containerd/continuity/fs"
@ -20,8 +24,37 @@ const (
tracingSocketPath = "//./pipe/otel-grpc" tracingSocketPath = "//./pipe/otel-grpc"
) )
func withProcessArgs(args ...string) oci.SpecOpts {
cmdLine := strings.Join(args, " ")
// This will set Args to nil and properly set the CommandLine option
// in the spec. On Windows we need to use CommandLine instead of Args.
return oci.WithProcessCommandLine(cmdLine)
}
func withGetUserInfoMount() oci.SpecOpts {
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
execPath, err := os.Executable()
if err != nil {
return errors.Wrap(err, "getting executable path")
}
// The buildkit binary registers a re-exec function that is invoked when called with
// get-user-info as the name. We mount the binary as read-only inside the container. This
// spares us from having to ship a separate binary just for this purpose. The container does
// not share any state with the running buildkit daemon. In this scenario, we use the re-exec
// functionality to simulate a multi-call binary.
s.Mounts = append(s.Mounts, specs.Mount{
Destination: "C:\\Windows\\System32\\get-user-info.exe",
Source: execPath,
Options: []string{"ro"},
})
return nil
}
}
func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) { func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) {
return nil, nil return []oci.SpecOpts{
withGetUserInfoMount(),
}, nil
} }
// generateSecurityOpts may affect mounts, so must be called after generateMountOpts // generateSecurityOpts may affect mounts, so must be called after generateMountOpts
@ -54,8 +87,8 @@ func generateRlimitOpts(ulimits []*pb.Ulimit) ([]oci.SpecOpts, error) {
return nil, errors.New("no support for POSIXRlimit on Windows") return nil, errors.New("no support for POSIXRlimit on Windows")
} }
func getTracingSocketMount(socket string) specs.Mount { func getTracingSocketMount(socket string) *specs.Mount {
return specs.Mount{ return &specs.Mount{
Destination: filepath.FromSlash(tracingSocketPath), Destination: filepath.FromSlash(tracingSocketPath),
Source: socket, Source: socket,
Options: []string{"ro"}, Options: []string{"ro"},

View file

@ -9,7 +9,7 @@ import (
"github.com/containerd/containerd/containers" "github.com/containerd/containerd/containers"
containerdoci "github.com/containerd/containerd/oci" containerdoci "github.com/containerd/containerd/oci"
"github.com/containerd/continuity/fs" "github.com/containerd/continuity/fs"
"github.com/opencontainers/runc/libcontainer/user" "github.com/moby/sys/user"
specs "github.com/opencontainers/runtime-spec/specs-go" specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors" "github.com/pkg/errors"
) )

View file

@ -11,9 +11,9 @@ import (
"time" "time"
"github.com/moby/buildkit/executor/resources/types" "github.com/moby/buildkit/executor/resources/types"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/network" "github.com/moby/buildkit/util/network"
"github.com/prometheus/procfs" "github.com/prometheus/procfs"
"github.com/sirupsen/logrus"
) )
const ( const (
@ -229,7 +229,7 @@ func NewMonitor() (*Monitor, error) {
return return
} }
if err := prepareCgroupControllers(); err != nil { if err := prepareCgroupControllers(); err != nil {
logrus.Warnf("failed to prepare cgroup controllers: %+v", err) bklog.L.Warnf("failed to prepare cgroup controllers: %+v", err)
} }
}) })
@ -280,7 +280,7 @@ func prepareCgroupControllers() error {
} }
if err := os.WriteFile(filepath.Join(defaultMountpoint, cgroupSubtreeFile), []byte("+"+c), 0); err != nil { if err := os.WriteFile(filepath.Join(defaultMountpoint, cgroupSubtreeFile), []byte("+"+c), 0); err != nil {
// ignore error // ignore error
logrus.Warnf("failed to enable cgroup controller %q: %+v", c, err) bklog.L.Warnf("failed to enable cgroup controller %q: %+v", c, err)
} }
} }
return nil return nil

View file

@ -146,8 +146,6 @@ func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Ex
} }
func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (rec resourcestypes.Recorder, err error) { func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (rec resourcestypes.Recorder, err error) {
meta := process.Meta
startedOnce := sync.Once{} startedOnce := sync.Once{}
done := make(chan error, 1) done := make(chan error, 1)
w.mu.Lock() w.mu.Lock()
@ -166,6 +164,11 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
} }
}() }()
meta := process.Meta
if meta.NetMode == pb.NetMode_HOST {
bklog.G(ctx).Info("enabling HostNetworking")
}
provider, ok := w.networkProviders[meta.NetMode] provider, ok := w.networkProviders[meta.NetMode]
if !ok { if !ok {
return nil, errors.Errorf("unknown network mode %s", meta.NetMode) return nil, errors.Errorf("unknown network mode %s", meta.NetMode)
@ -181,11 +184,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
} }
}() }()
if meta.NetMode == pb.NetMode_HOST { resolvConf, err := oci.GetResolvConf(ctx, w.root, w.idmap, w.dns, meta.NetMode)
bklog.G(ctx).Info("enabling HostNetworking")
}
resolvConf, err := oci.GetResolvConf(ctx, w.root, w.idmap, w.dns)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -369,7 +368,7 @@ func exitError(ctx context.Context, err error) error {
) )
select { select {
case <-ctx.Done(): case <-ctx.Done():
exitErr.Err = errors.Wrapf(ctx.Err(), exitErr.Error()) exitErr.Err = errors.Wrapf(context.Cause(ctx), exitErr.Error())
return exitErr return exitErr
default: default:
return stack.Enable(exitErr) return stack.Enable(exitErr)
@ -402,7 +401,7 @@ func (w *runcExecutor) Exec(ctx context.Context, id string, process executor.Pro
} }
select { select {
case <-ctx.Done(): case <-ctx.Done():
return ctx.Err() return context.Cause(ctx)
case err, ok := <-done: case err, ok := <-done:
if !ok || err == nil { if !ok || err == nil {
return errors.Errorf("container %s has stopped", id) return errors.Errorf("container %s has stopped", id)
@ -532,8 +531,9 @@ func (k procKiller) Kill(ctx context.Context) (err error) {
// this timeout is generally a no-op, the Kill ctx should already have a // this timeout is generally a no-op, the Kill ctx should already have a
// shorter timeout but here as a fail-safe for future refactoring. // shorter timeout but here as a fail-safe for future refactoring.
ctx, timeout := context.WithTimeout(ctx, 10*time.Second) ctx, cancel := context.WithCancelCause(ctx)
defer timeout() ctx, _ = context.WithTimeoutCause(ctx, 10*time.Second, errors.WithStack(context.DeadlineExceeded))
defer cancel(errors.WithStack(context.Canceled))
if k.pidfile == "" { if k.pidfile == "" {
// for `runc run` process we use `runc kill` to terminate the process // for `runc run` process we use `runc kill` to terminate the process
@ -580,7 +580,7 @@ type procHandle struct {
monitorProcess *os.Process monitorProcess *os.Process
ready chan struct{} ready chan struct{}
ended chan struct{} ended chan struct{}
shutdown func() shutdown func(error)
// this this only used when the request context is canceled and we need // this this only used when the request context is canceled and we need
// to kill the in-container process. // to kill the in-container process.
killer procKiller killer procKiller
@ -594,7 +594,7 @@ type procHandle struct {
// The goal is to allow for runc to gracefully shutdown when the request context // The goal is to allow for runc to gracefully shutdown when the request context
// is cancelled. // is cancelled.
func runcProcessHandle(ctx context.Context, killer procKiller) (*procHandle, context.Context) { func runcProcessHandle(ctx context.Context, killer procKiller) (*procHandle, context.Context) {
runcCtx, cancel := context.WithCancel(context.Background()) runcCtx, cancel := context.WithCancelCause(context.Background())
p := &procHandle{ p := &procHandle{
ready: make(chan struct{}), ready: make(chan struct{}),
ended: make(chan struct{}), ended: make(chan struct{}),
@ -615,17 +615,17 @@ func runcProcessHandle(ctx context.Context, killer procKiller) (*procHandle, con
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
killCtx, timeout := context.WithTimeout(context.Background(), 7*time.Second) killCtx, timeout := context.WithCancelCause(context.Background())
killCtx, _ = context.WithTimeoutCause(killCtx, 7*time.Second, errors.WithStack(context.DeadlineExceeded))
if err := p.killer.Kill(killCtx); err != nil { if err := p.killer.Kill(killCtx); err != nil {
select { select {
case <-killCtx.Done(): case <-killCtx.Done():
timeout() cancel(errors.WithStack(context.Cause(ctx)))
cancel()
return return
default: default:
} }
} }
timeout() timeout(errors.WithStack(context.Canceled))
select { select {
case <-time.After(50 * time.Millisecond): case <-time.After(50 * time.Millisecond):
case <-p.ended: case <-p.ended:
@ -653,7 +653,7 @@ func (p *procHandle) Release() {
// goroutines. // goroutines.
func (p *procHandle) Shutdown() { func (p *procHandle) Shutdown() {
if p.shutdown != nil { if p.shutdown != nil {
p.shutdown() p.shutdown(errors.WithStack(context.Canceled))
} }
} }
@ -663,7 +663,7 @@ func (p *procHandle) Shutdown() {
func (p *procHandle) WaitForReady(ctx context.Context) error { func (p *procHandle) WaitForReady(ctx context.Context) error {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return ctx.Err() return context.Cause(ctx)
case <-p.ready: case <-p.ready:
return nil return nil
} }
@ -673,10 +673,11 @@ func (p *procHandle) WaitForReady(ctx context.Context) error {
// We wait for up to 10s for the runc pid to be reported. If the started // We wait for up to 10s for the runc pid to be reported. If the started
// callback is non-nil it will be called after receiving the pid. // callback is non-nil it will be called after receiving the pid.
func (p *procHandle) WaitForStart(ctx context.Context, startedCh <-chan int, started func()) error { func (p *procHandle) WaitForStart(ctx context.Context, startedCh <-chan int, started func()) error {
startedCtx, timeout := context.WithTimeout(ctx, 10*time.Second) ctx, cancel := context.WithCancelCause(ctx)
defer timeout() ctx, _ = context.WithTimeoutCause(ctx, 10*time.Second, errors.WithStack(context.DeadlineExceeded))
defer cancel(errors.WithStack(context.Canceled))
select { select {
case <-startedCtx.Done(): case <-ctx.Done():
return errors.New("go-runc started message never received") return errors.New("go-runc started message never received")
case runcPid, ok := <-startedCh: case runcPid, ok := <-startedCh:
if !ok { if !ok {

View file

@ -14,13 +14,13 @@ import (
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
var unsupportedConsoleError = errors.New("tty for runc is only supported on linux") var errUnsupportedConsole = errors.New("tty for runc is only supported on linux")
func updateRuncFieldsForHostOS(runtime *runc.Runc) {} func updateRuncFieldsForHostOS(runtime *runc.Runc) {}
func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), keep bool) error { func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), keep bool) error {
if process.Meta.Tty { if process.Meta.Tty {
return unsupportedConsoleError return errUnsupportedConsole
} }
extraArgs := []string{} extraArgs := []string{}
if keep { if keep {
@ -40,7 +40,7 @@ func (w *runcExecutor) run(ctx context.Context, id, bundle string, process execu
func (w *runcExecutor) exec(ctx context.Context, id, bundle string, specsProcess *specs.Process, process executor.ProcessInfo, started func()) error { func (w *runcExecutor) exec(ctx context.Context, id, bundle string, specsProcess *specs.Process, process executor.ProcessInfo, started func()) error {
if process.Meta.Tty { if process.Meta.Tty {
return unsupportedConsoleError return errUnsupportedConsole
} }
killer, err := newExecProcKiller(w.runc, id) killer, err := newExecProcKiller(w.runc, id)

View file

@ -12,6 +12,7 @@ import (
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/labels"
"github.com/containerd/containerd/leases" "github.com/containerd/containerd/leases"
"github.com/containerd/containerd/pkg/epoch" "github.com/containerd/containerd/pkg/epoch"
"github.com/containerd/containerd/platforms" "github.com/containerd/containerd/platforms"
@ -63,9 +64,10 @@ func New(opt Opt) (exporter.Exporter, error) {
return im, nil return im, nil
} }
func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { func (e *imageExporter) Resolve(ctx context.Context, id int, opt map[string]string) (exporter.ExporterInstance, error) {
i := &imageExporterInstance{ i := &imageExporterInstance{
imageExporter: e, imageExporter: e,
id: id,
opts: ImageCommitOpts{ opts: ImageCommitOpts{
RefCfg: cacheconfig.RefConfig{ RefCfg: cacheconfig.RefConfig{
Compression: compression.New(compression.Default), Compression: compression.New(compression.Default),
@ -166,6 +168,8 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
type imageExporterInstance struct { type imageExporterInstance struct {
*imageExporter *imageExporter
id int
opts ImageCommitOpts opts ImageCommitOpts
push bool push bool
pushByDigest bool pushByDigest bool
@ -178,6 +182,10 @@ type imageExporterInstance struct {
meta map[string][]byte meta map[string][]byte
} }
func (e *imageExporterInstance) ID() int {
return e.id
}
func (e *imageExporterInstance) Name() string { func (e *imageExporterInstance) Name() string {
return "exporting to image" return "exporting to image"
} }
@ -186,7 +194,8 @@ func (e *imageExporterInstance) Config() *exporter.Config {
return exporter.NewConfigWithCompression(e.opts.RefCfg.Compression) return exporter.NewConfigWithCompression(e.opts.RefCfg.Compression)
} }
func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source, sessionID string) (_ map[string]string, descref exporter.DescriptorReference, err error) { func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source, inlineCache exptypes.InlineCache, sessionID string) (_ map[string]string, descref exporter.DescriptorReference, err error) {
src = src.Clone()
if src.Metadata == nil { if src.Metadata == nil {
src.Metadata = make(map[string][]byte) src.Metadata = make(map[string][]byte)
} }
@ -211,7 +220,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source
} }
}() }()
desc, err := e.opt.ImageWriter.Commit(ctx, src, sessionID, &opts) desc, err := e.opt.ImageWriter.Commit(ctx, src, sessionID, inlineCache, &opts)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -273,6 +282,13 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source
tagDone(nil) tagDone(nil)
if e.unpack { if e.unpack {
if opts.RewriteTimestamp {
// e.unpackImage cannot be used because src ref does not point to the rewritten image
///
// TODO: change e.unpackImage so that it takes Result[Remote] as parameter.
// https://github.com/moby/buildkit/pull/4057#discussion_r1324106088
return nil, nil, errors.New("exporter option \"rewrite-timestamp\" conflicts with \"unpack\"")
}
if err := e.unpackImage(ctx, img, src, session.NewGroup(sessionID)); err != nil { if err := e.unpackImage(ctx, img, src, session.NewGroup(sessionID)); err != nil {
return nil, nil, err return nil, nil, err
} }
@ -284,6 +300,9 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source
refs = append(refs, src.Ref) refs = append(refs, src.Ref)
} }
for _, ref := range src.Refs { for _, ref := range src.Refs {
if ref == nil {
continue
}
refs = append(refs, ref) refs = append(refs, ref)
} }
eg, ctx := errgroup.WithContext(ctx) eg, ctx := errgroup.WithContext(ctx)
@ -309,7 +328,18 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source
} }
} }
if e.push { if e.push {
err := e.pushImage(ctx, src, sessionID, targetName, desc.Digest) if opts.RewriteTimestamp {
annotations := map[digest.Digest]map[string]string{}
addAnnotations(annotations, *desc)
// e.pushImage cannot be used because src ref does not point to the rewritten image
//
// TODO: change e.pushImage so that it takes Result[Remote] as parameter.
// https://github.com/moby/buildkit/pull/4057#discussion_r1324106088
err = push.Push(ctx, e.opt.SessionManager, sessionID, e.opt.ImageWriter.opt.ContentStore, e.opt.ImageWriter.ContentStore(),
desc.Digest, targetName, e.insecure, e.opt.RegistryHosts, e.pushByDigest, annotations)
} else {
err = e.pushImage(ctx, src, sessionID, targetName, desc.Digest)
}
if err != nil { if err != nil {
return nil, nil, errors.Wrapf(err, "failed to push %v", targetName) return nil, nil, errors.Wrapf(err, "failed to push %v", targetName)
} }
@ -339,6 +369,9 @@ func (e *imageExporterInstance) pushImage(ctx context.Context, src *exporter.Sou
refs = append(refs, src.Ref) refs = append(refs, src.Ref)
} }
for _, ref := range src.Refs { for _, ref := range src.Refs {
if ref == nil {
continue
}
refs = append(refs, ref) refs = append(refs, ref)
} }
@ -454,7 +487,7 @@ func getLayers(ctx context.Context, descs []ocispecs.Descriptor, manifest ocispe
for i, desc := range descs { for i, desc := range descs {
layers[i].Diff = ocispecs.Descriptor{ layers[i].Diff = ocispecs.Descriptor{
MediaType: ocispecs.MediaTypeImageLayer, MediaType: ocispecs.MediaTypeImageLayer,
Digest: digest.Digest(desc.Annotations["containerd.io/uncompressed"]), Digest: digest.Digest(desc.Annotations[labels.LabelUncompressed]),
} }
layers[i].Blob = manifest.Layers[i] layers[i].Blob = manifest.Layers[i]
} }

View file

@ -72,4 +72,8 @@ var (
// Value: int (0-9) for gzip and estargz // Value: int (0-9) for gzip and estargz
// Value: int (0-22) for zstd // Value: int (0-22) for zstd
OptKeyCompressionLevel ImageExporterOptKey = "compression-level" OptKeyCompressionLevel ImageExporterOptKey = "compression-level"
// Rewrite timestamps in layers to match SOURCE_DATE_EPOCH
// Value: bool <true|false>
OptKeyRewriteTimestamp ImageExporterOptKey = "rewrite-timestamp"
) )

View file

@ -60,10 +60,13 @@ func ParsePlatforms(meta map[string][]byte) (Platforms, error) {
return ps, nil return ps, nil
} }
func ParseKey(meta map[string][]byte, key string, p Platform) []byte { func ParseKey(meta map[string][]byte, key string, p *Platform) []byte {
if v, ok := meta[fmt.Sprintf("%s/%s", key, p.ID)]; ok { if p != nil {
return v if v, ok := meta[fmt.Sprintf("%s/%s", key, p.ID)]; ok {
} else if v, ok := meta[key]; ok { return v
}
}
if v, ok := meta[key]; ok {
return v return v
} }
return nil return nil

View file

@ -1,6 +1,9 @@
package exptypes package exptypes
import ( import (
"context"
"github.com/moby/buildkit/solver/result"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
) )
@ -10,7 +13,7 @@ const (
ExporterImageConfigKey = "containerimage.config" ExporterImageConfigKey = "containerimage.config"
ExporterImageConfigDigestKey = "containerimage.config.digest" ExporterImageConfigDigestKey = "containerimage.config.digest"
ExporterImageDescriptorKey = "containerimage.descriptor" ExporterImageDescriptorKey = "containerimage.descriptor"
ExporterInlineCache = "containerimage.inlinecache" ExporterImageBaseConfigKey = "containerimage.base.config"
ExporterPlatformsKey = "refs.platforms" ExporterPlatformsKey = "refs.platforms"
) )
@ -18,7 +21,7 @@ const (
// a platform to become platform specific // a platform to become platform specific
var KnownRefMetadataKeys = []string{ var KnownRefMetadataKeys = []string{
ExporterImageConfigKey, ExporterImageConfigKey,
ExporterInlineCache, ExporterImageBaseConfigKey,
} }
type Platforms struct { type Platforms struct {
@ -29,3 +32,8 @@ type Platform struct {
ID string ID string
Platform ocispecs.Platform Platform ocispecs.Platform
} }
type InlineCacheEntry struct {
Data []byte
}
type InlineCache func(ctx context.Context) (*result.Result[*InlineCacheEntry], error)

View file

@ -1,52 +0,0 @@
package image
import (
"time"
"github.com/docker/docker/api/types/strslice"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
)
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
type HealthConfig struct {
// Test is the test to perform to check that the container is healthy.
// An empty slice means to inherit the default.
// The options are:
// {} : inherit healthcheck
// {"NONE"} : disable healthcheck
// {"CMD", args...} : exec arguments directly
// {"CMD-SHELL", command} : run command with system's default shell
Test []string `json:",omitempty"`
// Zero means to inherit. Durations are expressed as integer nanoseconds.
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
StartInterval time.Duration `json:",omitempty"` // StartInterval is the time to wait between checks during the start period.
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
// Zero means inherit.
Retries int `json:",omitempty"`
}
// ImageConfig is a docker compatible config for an image
type ImageConfig struct {
ocispecs.ImageConfig
Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
// NetworkDisabled bool `json:",omitempty"` // Is network disabled
// MacAddress string `json:",omitempty"` // Mac Address of the container
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
}
// Image is the JSON structure which describes some basic information about the image.
// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON.
type Image struct {
ocispecs.Image
// Config defines the execution parameters which should be used as a base when running a container using the image.
Config ImageConfig `json:"config,omitempty"`
}

View file

@ -21,6 +21,7 @@ type ImageCommitOpts struct {
Epoch *time.Time Epoch *time.Time
ForceInlineAttestations bool // force inline attestations to be attached ForceInlineAttestations bool // force inline attestations to be attached
RewriteTimestamp bool // rewrite timestamps in layers to match the epoch
} }
func (c *ImageCommitOpts) Load(ctx context.Context, opt map[string]string) (map[string]string, error) { func (c *ImageCommitOpts) Load(ctx context.Context, opt map[string]string) (map[string]string, error) {
@ -52,6 +53,8 @@ func (c *ImageCommitOpts) Load(ctx context.Context, opt map[string]string) (map[
err = parseBool(&c.ForceInlineAttestations, k, v) err = parseBool(&c.ForceInlineAttestations, k, v)
case exptypes.OptKeyPreferNondistLayers: case exptypes.OptKeyPreferNondistLayers:
err = parseBool(&c.RefCfg.PreferNonDistributable, k, v) err = parseBool(&c.RefCfg.PreferNonDistributable, k, v)
case exptypes.OptKeyRewriteTimestamp:
err = parseBool(&c.RewriteTimestamp, k, v)
default: default:
rest[k] = v rest[k] = v
} }
@ -65,10 +68,6 @@ func (c *ImageCommitOpts) Load(ctx context.Context, opt map[string]string) (map[
c.EnableOCITypes(ctx, c.RefCfg.Compression.Type.String()) c.EnableOCITypes(ctx, c.RefCfg.Compression.Type.String())
} }
if c.RefCfg.Compression.Type.NeedsForceCompression() {
c.EnableForceCompression(ctx, c.RefCfg.Compression.Type.String())
}
c.Annotations = c.Annotations.Merge(as) c.Annotations = c.Annotations.Merge(as)
return rest, nil return rest, nil

View file

@ -5,6 +5,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"reflect"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -12,6 +13,7 @@ import (
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/diff" "github.com/containerd/containerd/diff"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/labels"
"github.com/containerd/containerd/platforms" "github.com/containerd/containerd/platforms"
intoto "github.com/in-toto/in-toto-golang/in_toto" intoto "github.com/in-toto/in-toto-golang/in_toto"
"github.com/moby/buildkit/cache" "github.com/moby/buildkit/cache"
@ -27,10 +29,13 @@ import (
attestationTypes "github.com/moby/buildkit/util/attestation" attestationTypes "github.com/moby/buildkit/util/attestation"
"github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/converter"
"github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/util/progress"
"github.com/moby/buildkit/util/purl" "github.com/moby/buildkit/util/purl"
"github.com/moby/buildkit/util/system" "github.com/moby/buildkit/util/system"
"github.com/moby/buildkit/util/tracing" "github.com/moby/buildkit/util/tracing"
dockerspec "github.com/moby/docker-image-spec/specs-go/v1"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go" specs "github.com/opencontainers/image-spec/specs-go"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
@ -56,7 +61,7 @@ type ImageWriter struct {
opt WriterOpt opt WriterOpt
} }
func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, sessionID string, opts *ImageCommitOpts) (*ocispecs.Descriptor, error) { func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, sessionID string, inlineCache exptypes.InlineCache, opts *ImageCommitOpts) (*ocispecs.Descriptor, error) {
if _, ok := inp.Metadata[exptypes.ExporterPlatformsKey]; len(inp.Refs) > 0 && !ok { if _, ok := inp.Metadata[exptypes.ExporterPlatformsKey]; len(inp.Refs) > 0 && !ok {
return nil, errors.Errorf("unable to export multiple refs, missing platforms mapping") return nil, errors.Errorf("unable to export multiple refs, missing platforms mapping")
} }
@ -111,29 +116,59 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session
} }
var ref cache.ImmutableRef var ref cache.ImmutableRef
var p exptypes.Platform var p *exptypes.Platform
if len(ps.Platforms) > 0 { if len(ps.Platforms) > 0 {
p = ps.Platforms[0] p = &ps.Platforms[0]
if r, ok := inp.FindRef(p.ID); ok { if r, ok := inp.FindRef(p.ID); ok {
ref = r ref = r
} }
} else { } else {
ref = inp.Ref ref = inp.Ref
} }
config := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageConfigKey, p)
baseImgConfig := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageBaseConfigKey, p)
var baseImg *dockerspec.DockerOCIImage
if len(baseImgConfig) > 0 {
var baseImgX dockerspec.DockerOCIImage
if err := json.Unmarshal(baseImgConfig, &baseImgX); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal base image config")
}
baseImg = &baseImgX
}
remotes, err := ic.exportLayers(ctx, opts.RefCfg, session.NewGroup(sessionID), ref) remotes, err := ic.exportLayers(ctx, opts.RefCfg, session.NewGroup(sessionID), ref)
if err != nil { if err != nil {
return nil, err return nil, err
} }
remote := &remotes[0]
if opts.RewriteTimestamp {
remote, err = ic.rewriteRemoteWithEpoch(ctx, opts, remote, baseImg)
if err != nil {
return nil, err
}
}
annotations := opts.Annotations.Platform(nil) annotations := opts.Annotations.Platform(nil)
if len(annotations.Index) > 0 || len(annotations.IndexDescriptor) > 0 { if len(annotations.Index) > 0 || len(annotations.IndexDescriptor) > 0 {
return nil, errors.Errorf("index annotations not supported for single platform export") return nil, errors.Errorf("index annotations not supported for single platform export")
} }
config := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageConfigKey, p) var inlineCacheEntry *exptypes.InlineCacheEntry
inlineCache := exptypes.ParseKey(inp.Metadata, exptypes.ExporterInlineCache, p) if inlineCache != nil {
mfstDesc, configDesc, err := ic.commitDistributionManifest(ctx, opts, ref, config, &remotes[0], annotations, inlineCache, opts.Epoch, session.NewGroup(sessionID)) inlineCacheResult, err := inlineCache(ctx)
if err != nil {
return nil, err
}
if inlineCacheResult != nil {
if p != nil {
inlineCacheEntry, _ = inlineCacheResult.FindRef(p.ID)
} else {
inlineCacheEntry = inlineCacheResult.Ref
}
}
}
mfstDesc, configDesc, err := ic.commitDistributionManifest(ctx, opts, ref, config, remote, annotations, inlineCacheEntry, opts.Epoch, session.NewGroup(sessionID), baseImg)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -168,6 +203,14 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session
return nil, err return nil, err
} }
var inlineCacheResult *result.Result[*exptypes.InlineCacheEntry]
if inlineCache != nil {
inlineCacheResult, err = inlineCache(ctx)
if err != nil {
return nil, err
}
}
idx := ocispecs.Index{ idx := ocispecs.Index{
MediaType: ocispecs.MediaTypeImageIndex, MediaType: ocispecs.MediaTypeImageIndex,
Annotations: opts.Annotations.Platform(nil).Index, Annotations: opts.Annotations.Platform(nil).Index,
@ -189,8 +232,16 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session
if !ok { if !ok {
return nil, errors.Errorf("failed to find ref for ID %s", p.ID) return nil, errors.Errorf("failed to find ref for ID %s", p.ID)
} }
config := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageConfigKey, p) config := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageConfigKey, &p)
inlineCache := exptypes.ParseKey(inp.Metadata, exptypes.ExporterInlineCache, p) baseImgConfig := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageBaseConfigKey, &p)
var baseImg *dockerspec.DockerOCIImage
if len(baseImgConfig) > 0 {
var baseImgX dockerspec.DockerOCIImage
if err := json.Unmarshal(baseImgConfig, &baseImgX); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal base image config")
}
baseImg = &baseImgX
}
remote := &remotes[remotesMap[p.ID]] remote := &remotes[remotesMap[p.ID]]
if remote == nil { if remote == nil {
@ -198,8 +249,19 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session
Provider: ic.opt.ContentStore, Provider: ic.opt.ContentStore,
} }
} }
if opts.RewriteTimestamp {
remote, err = ic.rewriteRemoteWithEpoch(ctx, opts, remote, baseImg)
if err != nil {
return nil, err
}
}
desc, _, err := ic.commitDistributionManifest(ctx, opts, r, config, remote, opts.Annotations.Platform(&p.Platform), inlineCache, opts.Epoch, session.NewGroup(sessionID)) var inlineCacheEntry *exptypes.InlineCacheEntry
if inlineCacheResult != nil {
inlineCacheEntry, _ = inlineCacheResult.FindRef(p.ID)
}
desc, _, err := ic.commitDistributionManifest(ctx, opts, r, config, remote, opts.Annotations.Platform(&p.Platform), inlineCacheEntry, opts.Epoch, session.NewGroup(sessionID), baseImg)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -323,7 +385,74 @@ func (ic *ImageWriter) exportLayers(ctx context.Context, refCfg cacheconfig.RefC
return out, err return out, err
} }
func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *ImageCommitOpts, ref cache.ImmutableRef, config []byte, remote *solver.Remote, annotations *Annotations, inlineCache []byte, epoch *time.Time, sg session.Group) (*ocispecs.Descriptor, *ocispecs.Descriptor, error) { // rewriteImageLayerWithEpoch rewrites the file timestamps in the layer blob to match the epoch, and returns a new descriptor that points to
// the new blob.
//
// If no conversion is needed, this returns nil without error.
func rewriteImageLayerWithEpoch(ctx context.Context, cs content.Store, desc ocispecs.Descriptor, comp compression.Config, epoch *time.Time, immDiffID digest.Digest) (*ocispecs.Descriptor, error) {
var immDiffIDs map[digest.Digest]struct{}
if immDiffID != "" {
immDiffIDs = map[digest.Digest]struct{}{
immDiffID: {},
}
}
converterFn, err := converter.NewWithRewriteTimestamp(ctx, cs, desc, comp, epoch, immDiffIDs)
if err != nil {
return nil, err
}
if converterFn == nil {
return nil, nil
}
return converterFn(ctx, cs, desc)
}
func (ic *ImageWriter) rewriteRemoteWithEpoch(ctx context.Context, opts *ImageCommitOpts, remote *solver.Remote, baseImg *dockerspec.DockerOCIImage) (*solver.Remote, error) {
if opts.Epoch == nil {
bklog.G(ctx).Warn("rewrite-timestamp is specified, but no source-date-epoch was found")
return remote, nil
}
remoteDescriptors := remote.Descriptors
cs := contentutil.NewStoreWithProvider(ic.opt.ContentStore, remote.Provider)
eg, ctx := errgroup.WithContext(ctx)
rewriteDone := progress.OneOff(ctx,
fmt.Sprintf("rewriting layers with source-date-epoch %d (%s)", opts.Epoch.Unix(), opts.Epoch.String()))
var divergedFromBase bool
for i, desc := range remoteDescriptors {
i, desc := i, desc
info, err := cs.Info(ctx, desc.Digest)
if err != nil {
return nil, err
}
diffID := digest.Digest(info.Labels[labels.LabelUncompressed]) // can be empty
var immDiffID digest.Digest
if !divergedFromBase && baseImg != nil && i < len(baseImg.RootFS.DiffIDs) {
immDiffID = baseImg.RootFS.DiffIDs[i]
if immDiffID == diffID {
bklog.G(ctx).WithField("blob", desc).Debugf("Not rewriting to apply epoch (immutable diffID %q)", diffID)
continue
}
divergedFromBase = true
}
eg.Go(func() error {
if rewrittenDesc, err := rewriteImageLayerWithEpoch(ctx, cs, desc, opts.RefCfg.Compression, opts.Epoch, immDiffID); err != nil {
bklog.G(ctx).WithError(err).Warnf("failed to rewrite layer %d/%d to match source-date-epoch %d (%s)",
i+1, len(remoteDescriptors), opts.Epoch.Unix(), opts.Epoch.String())
} else if rewrittenDesc != nil {
remoteDescriptors[i] = *rewrittenDesc
}
return nil
})
}
if err := rewriteDone(eg.Wait()); err != nil {
return nil, err
}
return &solver.Remote{
Provider: cs,
Descriptors: remoteDescriptors,
}, nil
}
func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *ImageCommitOpts, ref cache.ImmutableRef, config []byte, remote *solver.Remote, annotations *Annotations, inlineCache *exptypes.InlineCacheEntry, epoch *time.Time, sg session.Group, baseImg *dockerspec.DockerOCIImage) (*ocispecs.Descriptor, *ocispecs.Descriptor, error) {
if len(config) == 0 { if len(config) == 0 {
var err error var err error
config, err = defaultImageConfig() config, err = defaultImageConfig()
@ -342,7 +471,7 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *Ima
return nil, nil, err return nil, nil, err
} }
config, err = patchImageConfig(config, remote.Descriptors, history, inlineCache, epoch) config, err = patchImageConfig(config, remote.Descriptors, history, inlineCache, epoch, baseImg)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -443,8 +572,8 @@ func (ic *ImageWriter) commitAttestationsManifest(ctx context.Context, opts *Ima
Digest: digest, Digest: digest,
Size: int64(len(data)), Size: int64(len(data)),
Annotations: map[string]string{ Annotations: map[string]string{
"containerd.io/uncompressed": digest.String(), labels.LabelUncompressed: digest.String(),
"in-toto.io/predicate-type": statement.PredicateType, "in-toto.io/predicate-type": statement.PredicateType,
}, },
} }
@ -535,6 +664,8 @@ func defaultImageConfig() ([]byte, error) {
img := ocispecs.Image{} img := ocispecs.Image{}
img.Architecture = pl.Architecture img.Architecture = pl.Architecture
img.OS = pl.OS img.OS = pl.OS
img.OSVersion = pl.OSVersion
img.OSFeatures = pl.OSFeatures
img.Variant = pl.Variant img.Variant = pl.Variant
img.RootFS.Type = "layers" img.RootFS.Type = "layers"
img.Config.WorkingDir = "/" img.Config.WorkingDir = "/"
@ -552,7 +683,7 @@ func attestationsConfig(layers []ocispecs.Descriptor) ([]byte, error) {
img.Variant = intotoPlatform.Variant img.Variant = intotoPlatform.Variant
img.RootFS.Type = "layers" img.RootFS.Type = "layers"
for _, layer := range layers { for _, layer := range layers {
img.RootFS.DiffIDs = append(img.RootFS.DiffIDs, digest.Digest(layer.Annotations["containerd.io/uncompressed"])) img.RootFS.DiffIDs = append(img.RootFS.DiffIDs, digest.Digest(layer.Annotations[labels.LabelUncompressed]))
} }
dt, err := json.Marshal(img) dt, err := json.Marshal(img)
return dt, errors.Wrap(err, "failed to create attestations image config") return dt, errors.Wrap(err, "failed to create attestations image config")
@ -568,7 +699,7 @@ func parseHistoryFromConfig(dt []byte) ([]ocispecs.History, error) {
return config.History, nil return config.History, nil
} }
func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs.History, cache []byte, epoch *time.Time) ([]byte, error) { func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs.History, cache *exptypes.InlineCacheEntry, epoch *time.Time, baseImg *dockerspec.DockerOCIImage) ([]byte, error) {
var img ocispecs.Image var img ocispecs.Image
if err := json.Unmarshal(dt, &img); err != nil { if err := json.Unmarshal(dt, &img); err != nil {
return nil, errors.Wrap(err, "invalid image config for export") return nil, errors.Wrap(err, "invalid image config for export")
@ -593,7 +724,7 @@ func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs
var rootFS ocispecs.RootFS var rootFS ocispecs.RootFS
rootFS.Type = "layers" rootFS.Type = "layers"
for _, desc := range descs { for _, desc := range descs {
rootFS.DiffIDs = append(rootFS.DiffIDs, digest.Digest(desc.Annotations["containerd.io/uncompressed"])) rootFS.DiffIDs = append(rootFS.DiffIDs, digest.Digest(desc.Annotations[labels.LabelUncompressed]))
} }
dt, err := json.Marshal(rootFS) dt, err := json.Marshal(rootFS)
if err != nil { if err != nil {
@ -602,7 +733,14 @@ func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs
m["rootfs"] = dt m["rootfs"] = dt
if epoch != nil { if epoch != nil {
var divergedFromBase bool
for i, h := range history { for i, h := range history {
if !divergedFromBase && baseImg != nil && i < len(baseImg.History) && reflect.DeepEqual(h, baseImg.History[i]) {
// Retain the timestamp for the base image layers
// https://github.com/moby/buildkit/issues/4614
continue
}
divergedFromBase = true
if h.Created == nil || h.Created.After(*epoch) { if h.Created == nil || h.Created.After(*epoch) {
history[i].Created = epoch history[i].Created = epoch
} }
@ -645,7 +783,7 @@ func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs
} }
if cache != nil { if cache != nil {
dt, err := json.Marshal(cache) dt, err := json.Marshal(cache.Data)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -751,7 +889,7 @@ func RemoveInternalLayerAnnotations(in map[string]string, oci bool) map[string]s
for k, v := range in { for k, v := range in {
// oci supports annotations but don't export internal annotations // oci supports annotations but don't export internal annotations
switch k { switch k {
case "containerd.io/uncompressed", "buildkit/createdat": case labels.LabelUncompressed, "buildkit/createdat":
continue continue
default: default:
if strings.HasPrefix(k, "containerd.io/distribution.source.") { if strings.HasPrefix(k, "containerd.io/distribution.source.") {

View file

@ -4,6 +4,7 @@ import (
"context" "context"
"github.com/moby/buildkit/cache" "github.com/moby/buildkit/cache"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/solver/result" "github.com/moby/buildkit/solver/result"
"github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/compression"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
@ -14,13 +15,14 @@ type Source = result.Result[cache.ImmutableRef]
type Attestation = result.Attestation[cache.ImmutableRef] type Attestation = result.Attestation[cache.ImmutableRef]
type Exporter interface { type Exporter interface {
Resolve(context.Context, map[string]string) (ExporterInstance, error) Resolve(context.Context, int, map[string]string) (ExporterInstance, error)
} }
type ExporterInstance interface { type ExporterInstance interface {
ID() int
Name() string Name() string
Config() *Config Config() *Config
Export(ctx context.Context, src *Source, sessionID string) (map[string]string, DescriptorReference, error) Export(ctx context.Context, src *Source, inlineCache exptypes.InlineCache, sessionID string) (map[string]string, DescriptorReference, error)
} }
type DescriptorReference interface { type DescriptorReference interface {

View file

@ -35,8 +35,9 @@ func New(opt Opt) (exporter.Exporter, error) {
return le, nil return le, nil
} }
func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { func (e *localExporter) Resolve(ctx context.Context, id int, opt map[string]string) (exporter.ExporterInstance, error) {
i := &localExporterInstance{ i := &localExporterInstance{
id: id,
localExporter: e, localExporter: e,
} }
_, err := i.opts.Load(opt) _, err := i.opts.Load(opt)
@ -49,9 +50,15 @@ func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exp
type localExporterInstance struct { type localExporterInstance struct {
*localExporter *localExporter
id int
opts CreateFSOpts opts CreateFSOpts
} }
func (e *localExporterInstance) ID() int {
return e.id
}
func (e *localExporterInstance) Name() string { func (e *localExporterInstance) Name() string {
return "exporting to client directory" return "exporting to client directory"
} }
@ -60,9 +67,10 @@ func (e *localExporter) Config() *exporter.Config {
return exporter.NewConfig() return exporter.NewConfig()
} }
func (e *localExporterInstance) Export(ctx context.Context, inp *exporter.Source, sessionID string) (map[string]string, exporter.DescriptorReference, error) { func (e *localExporterInstance) Export(ctx context.Context, inp *exporter.Source, _ exptypes.InlineCache, sessionID string) (map[string]string, exporter.DescriptorReference, error) {
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) timeoutCtx, cancel := context.WithCancelCause(ctx)
defer cancel() timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 5*time.Second, errors.WithStack(context.DeadlineExceeded))
defer cancel(errors.WithStack(context.Canceled))
if e.opts.Epoch == nil { if e.opts.Epoch == nil {
if tm, ok, err := epoch.ParseSource(inp); err != nil { if tm, ok, err := epoch.ParseSource(inp); err != nil {
@ -108,8 +116,8 @@ func (e *localExporterInstance) Export(ctx context.Context, inp *exporter.Source
if !e.opts.PlatformSplit { if !e.opts.PlatformSplit {
// check for duplicate paths // check for duplicate paths
err = outputFS.Walk(ctx, func(p string, fi os.FileInfo, err error) error { err = outputFS.Walk(ctx, "", func(p string, entry os.DirEntry, err error) error {
if fi.IsDir() { if entry.IsDir() {
return nil return nil
} }
if err != nil && !errors.Is(err, os.ErrNotExist) { if err != nil && !errors.Is(err, os.ErrNotExist) {
@ -147,7 +155,7 @@ func (e *localExporterInstance) Export(ctx context.Context, inp *exporter.Source
} }
progress := NewProgressHandler(ctx, lbl) progress := NewProgressHandler(ctx, lbl)
if err := filesync.CopyToCaller(ctx, outputFS, caller, progress); err != nil { if err := filesync.CopyToCaller(ctx, outputFS, e.id, caller, progress); err != nil {
return err return err
} }
return nil return nil

View file

@ -98,9 +98,14 @@ func CreateFS(ctx context.Context, sessionID string, k string, ref cache.Immutab
cleanup = lm.Unmount cleanup = lm.Unmount
} }
walkOpt := &fsutil.WalkOpt{} outputFS, err := fsutil.NewFS(src)
var idMapFunc func(p string, st *fstypes.Stat) fsutil.MapResult if err != nil {
return nil, nil, err
}
// wrap the output filesystem, applying appropriate filters
filterOpt := &fsutil.FilterOpt{}
var idMapFunc func(p string, st *fstypes.Stat) fsutil.MapResult
if idmap != nil { if idmap != nil {
idMapFunc = func(p string, st *fstypes.Stat) fsutil.MapResult { idMapFunc = func(p string, st *fstypes.Stat) fsutil.MapResult {
uid, gid, err := idmap.ToContainer(idtools.Identity{ uid, gid, err := idmap.ToContainer(idtools.Identity{
@ -115,19 +120,23 @@ func CreateFS(ctx context.Context, sessionID string, k string, ref cache.Immutab
return fsutil.MapResultKeep return fsutil.MapResultKeep
} }
} }
filterOpt.Map = func(p string, st *fstypes.Stat) fsutil.MapResult {
walkOpt.Map = func(p string, st *fstypes.Stat) fsutil.MapResult {
res := fsutil.MapResultKeep res := fsutil.MapResultKeep
if idMapFunc != nil { if idMapFunc != nil {
// apply host uid/gid
res = idMapFunc(p, st) res = idMapFunc(p, st)
} }
if opt.Epoch != nil { if opt.Epoch != nil {
// apply used-specified epoch time
st.ModTime = opt.Epoch.UnixNano() st.ModTime = opt.Epoch.UnixNano()
} }
return res return res
} }
outputFS, err = fsutil.NewFilterFS(outputFS, filterOpt)
if err != nil {
return nil, nil, err
}
outputFS := fsutil.NewFS(src, walkOpt)
attestations = attestation.Filter(attestations, nil, map[string][]byte{ attestations = attestation.Filter(attestations, nil, map[string][]byte{
result.AttestationInlineOnlyKey: []byte(strconv.FormatBool(true)), result.AttestationInlineOnlyKey: []byte(strconv.FormatBool(true)),
}) })
@ -137,11 +146,11 @@ func CreateFS(ctx context.Context, sessionID string, k string, ref cache.Immutab
} }
if len(attestations) > 0 { if len(attestations) > 0 {
subjects := []intoto.Subject{} subjects := []intoto.Subject{}
err = outputFS.Walk(ctx, func(path string, info fs.FileInfo, err error) error { err = outputFS.Walk(ctx, "", func(path string, entry fs.DirEntry, err error) error {
if err != nil { if err != nil {
return err return err
} }
if !info.Mode().IsRegular() { if !entry.Type().IsRegular() {
return nil return nil
} }
f, err := outputFS.Open(path) f, err := outputFS.Open(path)

View file

@ -11,7 +11,7 @@ import (
archiveexporter "github.com/containerd/containerd/images/archive" archiveexporter "github.com/containerd/containerd/images/archive"
"github.com/containerd/containerd/leases" "github.com/containerd/containerd/leases"
"github.com/docker/distribution/reference" "github.com/distribution/reference"
"github.com/moby/buildkit/cache" "github.com/moby/buildkit/cache"
cacheconfig "github.com/moby/buildkit/cache/config" cacheconfig "github.com/moby/buildkit/cache/config"
"github.com/moby/buildkit/exporter" "github.com/moby/buildkit/exporter"
@ -58,9 +58,10 @@ func New(opt Opt) (exporter.Exporter, error) {
return im, nil return im, nil
} }
func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { func (e *imageExporter) Resolve(ctx context.Context, id int, opt map[string]string) (exporter.ExporterInstance, error) {
i := &imageExporterInstance{ i := &imageExporterInstance{
imageExporter: e, imageExporter: e,
id: id,
tar: true, tar: true,
opts: containerimage.ImageCommitOpts{ opts: containerimage.ImageCommitOpts{
RefCfg: cacheconfig.RefConfig{ RefCfg: cacheconfig.RefConfig{
@ -99,11 +100,17 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
type imageExporterInstance struct { type imageExporterInstance struct {
*imageExporter *imageExporter
id int
opts containerimage.ImageCommitOpts opts containerimage.ImageCommitOpts
tar bool tar bool
meta map[string][]byte meta map[string][]byte
} }
func (e *imageExporterInstance) ID() int {
return e.id
}
func (e *imageExporterInstance) Name() string { func (e *imageExporterInstance) Name() string {
return fmt.Sprintf("exporting to %s image format", e.opt.Variant) return fmt.Sprintf("exporting to %s image format", e.opt.Variant)
} }
@ -112,11 +119,12 @@ func (e *imageExporterInstance) Config() *exporter.Config {
return exporter.NewConfigWithCompression(e.opts.RefCfg.Compression) return exporter.NewConfigWithCompression(e.opts.RefCfg.Compression)
} }
func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source, sessionID string) (_ map[string]string, descref exporter.DescriptorReference, err error) { func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source, inlineCache exptypes.InlineCache, sessionID string) (_ map[string]string, descref exporter.DescriptorReference, err error) {
if e.opt.Variant == VariantDocker && len(src.Refs) > 0 { if e.opt.Variant == VariantDocker && len(src.Refs) > 0 {
return nil, nil, errors.Errorf("docker exporter does not currently support exporting manifest lists") return nil, nil, errors.Errorf("docker exporter does not currently support exporting manifest lists")
} }
src = src.Clone()
if src.Metadata == nil { if src.Metadata == nil {
src.Metadata = make(map[string][]byte) src.Metadata = make(map[string][]byte)
} }
@ -141,7 +149,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source
} }
}() }()
desc, err := e.opt.ImageWriter.Commit(ctx, src, sessionID, &opts) desc, err := e.opt.ImageWriter.Commit(ctx, src, sessionID, inlineCache, &opts)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -198,8 +206,9 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source
return nil, nil, errors.Errorf("invalid variant %q", e.opt.Variant) return nil, nil, errors.Errorf("invalid variant %q", e.opt.Variant)
} }
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) timeoutCtx, cancel := context.WithCancelCause(ctx)
defer cancel() timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 5*time.Second, errors.WithStack(context.DeadlineExceeded))
defer cancel(errors.WithStack(context.Canceled))
caller, err := e.opt.SessionManager.Get(timeoutCtx, sessionID, false) caller, err := e.opt.SessionManager.Get(timeoutCtx, sessionID, false)
if err != nil { if err != nil {
@ -239,7 +248,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source
} }
if e.tar { if e.tar {
w, err := filesync.CopyFileWriter(ctx, resp, caller) w, err := filesync.CopyFileWriter(ctx, resp, e.id, caller)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }

View file

@ -33,8 +33,11 @@ func New(opt Opt) (exporter.Exporter, error) {
return le, nil return le, nil
} }
func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { func (e *localExporter) Resolve(ctx context.Context, id int, opt map[string]string) (exporter.ExporterInstance, error) {
li := &localExporterInstance{localExporter: e} li := &localExporterInstance{
localExporter: e,
id: id,
}
_, err := li.opts.Load(opt) _, err := li.opts.Load(opt)
if err != nil { if err != nil {
return nil, err return nil, err
@ -46,9 +49,15 @@ func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exp
type localExporterInstance struct { type localExporterInstance struct {
*localExporter *localExporter
id int
opts local.CreateFSOpts opts local.CreateFSOpts
} }
func (e *localExporterInstance) ID() int {
return e.id
}
func (e *localExporterInstance) Name() string { func (e *localExporterInstance) Name() string {
return "exporting to client tarball" return "exporting to client tarball"
} }
@ -57,7 +66,7 @@ func (e *localExporterInstance) Config() *exporter.Config {
return exporter.NewConfig() return exporter.NewConfig()
} }
func (e *localExporterInstance) Export(ctx context.Context, inp *exporter.Source, sessionID string) (map[string]string, exporter.DescriptorReference, error) { func (e *localExporterInstance) Export(ctx context.Context, inp *exporter.Source, _ exptypes.InlineCache, sessionID string) (map[string]string, exporter.DescriptorReference, error) {
var defers []func() error var defers []func() error
defer func() { defer func() {
@ -143,15 +152,16 @@ func (e *localExporterInstance) Export(ctx context.Context, inp *exporter.Source
fs = d.FS fs = d.FS
} }
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) timeoutCtx, cancel := context.WithCancelCause(ctx)
defer cancel() timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 5*time.Second, errors.WithStack(context.DeadlineExceeded))
defer cancel(errors.WithStack(context.Canceled))
caller, err := e.opt.SessionManager.Get(timeoutCtx, sessionID, false) caller, err := e.opt.SessionManager.Get(timeoutCtx, sessionID, false)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
w, err := filesync.CopyFileWriter(ctx, nil, caller) w, err := filesync.CopyFileWriter(ctx, nil, e.id, caller)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }

View file

@ -9,6 +9,7 @@ import (
intoto "github.com/in-toto/in-toto-golang/in_toto" intoto "github.com/in-toto/in-toto-golang/in_toto"
"github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/client/llb/sourceresolver"
gatewaypb "github.com/moby/buildkit/frontend/gateway/pb" gatewaypb "github.com/moby/buildkit/frontend/gateway/pb"
"github.com/moby/buildkit/solver/result" "github.com/moby/buildkit/solver/result"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
@ -33,12 +34,13 @@ const (
// attestation. // attestation.
type Scanner func(ctx context.Context, name string, ref llb.State, extras map[string]llb.State, opts ...llb.ConstraintsOpt) (result.Attestation[*llb.State], error) type Scanner func(ctx context.Context, name string, ref llb.State, extras map[string]llb.State, opts ...llb.ConstraintsOpt) (result.Attestation[*llb.State], error)
func CreateSBOMScanner(ctx context.Context, resolver llb.ImageMetaResolver, scanner string, resolveOpt llb.ResolveImageConfigOpt) (Scanner, error) { func CreateSBOMScanner(ctx context.Context, resolver sourceresolver.MetaResolver, scanner string, resolveOpt sourceresolver.Opt) (Scanner, error) {
if scanner == "" { if scanner == "" {
return nil, nil return nil, nil
} }
scanner, _, dt, err := resolver.ResolveImageConfig(ctx, scanner, resolveOpt) imr := sourceresolver.NewImageMetaResolver(resolver)
scanner, _, dt, err := imr.ResolveImageConfig(ctx, scanner, resolveOpt)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -7,7 +7,7 @@ import (
"github.com/containerd/containerd/platforms" "github.com/containerd/containerd/platforms"
"github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/exporter/containerimage/image" "github.com/moby/buildkit/client/llb/sourceresolver"
"github.com/moby/buildkit/frontend" "github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/frontend/attestations/sbom" "github.com/moby/buildkit/frontend/attestations/sbom"
"github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb" "github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb"
@ -20,6 +20,7 @@ import (
"github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/solver/result" "github.com/moby/buildkit/solver/result"
dockerspec "github.com/moby/docker-image-spec/specs-go/v1"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -93,14 +94,19 @@ func Build(ctx context.Context, c client.Client) (_ *client.Result, err error) {
defer func() { defer func() {
var el *parser.ErrorLocation var el *parser.ErrorLocation
if errors.As(err, &el) { if errors.As(err, &el) {
err = wrapSource(err, src.SourceMap, el.Location) for _, l := range el.Locations {
err = wrapSource(err, src.SourceMap, l)
}
} }
}() }()
var scanner sbom.Scanner var scanner sbom.Scanner
if bc.SBOM != nil { if bc.SBOM != nil {
scanner, err = sbom.CreateSBOMScanner(ctx, c, bc.SBOM.Generator, llb.ResolveImageConfigOpt{ // TODO: scanner should pass policy
ResolveMode: opts["image-resolve-mode"], scanner, err = sbom.CreateSBOMScanner(ctx, c, bc.SBOM.Generator, sourceresolver.Opt{
ImageOpt: &sourceresolver.ResolveImageOpt{
ResolveMode: opts["image-resolve-mode"],
},
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@ -109,21 +115,21 @@ func Build(ctx context.Context, c client.Client) (_ *client.Result, err error) {
scanTargets := sync.Map{} scanTargets := sync.Map{}
rb, err := bc.Build(ctx, func(ctx context.Context, platform *ocispecs.Platform, idx int) (client.Reference, *image.Image, error) { rb, err := bc.Build(ctx, func(ctx context.Context, platform *ocispecs.Platform, idx int) (client.Reference, *dockerspec.DockerOCIImage, *dockerspec.DockerOCIImage, error) {
opt := convertOpt opt := convertOpt
opt.TargetPlatform = platform opt.TargetPlatform = platform
if idx != 0 { if idx != 0 {
opt.Warn = nil opt.Warn = nil
} }
st, img, scanTarget, err := dockerfile2llb.Dockerfile2LLB(ctx, src.Data, opt) st, img, baseImg, scanTarget, err := dockerfile2llb.Dockerfile2LLB(ctx, src.Data, opt)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, nil, err
} }
def, err := st.Marshal(ctx) def, err := st.Marshal(ctx)
if err != nil { if err != nil {
return nil, nil, errors.Wrapf(err, "failed to marshal LLB definition") return nil, nil, nil, errors.Wrapf(err, "failed to marshal LLB definition")
} }
r, err := c.Solve(ctx, client.SolveRequest{ r, err := c.Solve(ctx, client.SolveRequest{
@ -131,12 +137,12 @@ func Build(ctx context.Context, c client.Client) (_ *client.Result, err error) {
CacheImports: bc.CacheImports, CacheImports: bc.CacheImports,
}) })
if err != nil { if err != nil {
return nil, nil, err return nil, nil, nil, err
} }
ref, err := r.SingleRef() ref, err := r.SingleRef()
if err != nil { if err != nil {
return nil, nil, err return nil, nil, nil, err
} }
p := platforms.DefaultSpec() p := platforms.DefaultSpec()
@ -145,7 +151,7 @@ func Build(ctx context.Context, c client.Client) (_ *client.Result, err error) {
} }
scanTargets.Store(platforms.Format(platforms.Normalize(p)), scanTarget) scanTargets.Store(platforms.Format(platforms.Normalize(p)), scanTarget)
return ref, img, nil return ref, img, baseImg, nil
}) })
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -16,11 +16,11 @@ import (
"time" "time"
"github.com/containerd/containerd/platforms" "github.com/containerd/containerd/platforms"
"github.com/docker/distribution/reference" "github.com/distribution/reference"
"github.com/docker/go-connections/nat" "github.com/docker/go-connections/nat"
"github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/client/llb/imagemetaresolver" "github.com/moby/buildkit/client/llb/imagemetaresolver"
"github.com/moby/buildkit/exporter/containerimage/image" "github.com/moby/buildkit/client/llb/sourceresolver"
"github.com/moby/buildkit/frontend/dockerfile/instructions" "github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/moby/buildkit/frontend/dockerfile/parser"
"github.com/moby/buildkit/frontend/dockerfile/shell" "github.com/moby/buildkit/frontend/dockerfile/shell"
@ -33,6 +33,7 @@ import (
"github.com/moby/buildkit/util/gitutil" "github.com/moby/buildkit/util/gitutil"
"github.com/moby/buildkit/util/suggest" "github.com/moby/buildkit/util/suggest"
"github.com/moby/buildkit/util/system" "github.com/moby/buildkit/util/system"
dockerspec "github.com/moby/docker-image-spec/specs-go/v1"
"github.com/moby/sys/signal" "github.com/moby/sys/signal"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
@ -56,6 +57,7 @@ var nonEnvArgs = map[string]struct{}{
type ConvertOpt struct { type ConvertOpt struct {
dockerui.Config dockerui.Config
Client *dockerui.Client Client *dockerui.Client
MainContext *llb.State
SourceMap *llb.SourceMap SourceMap *llb.SourceMap
TargetPlatform *ocispecs.Platform TargetPlatform *ocispecs.Platform
MetaResolver llb.ImageMetaResolver MetaResolver llb.ImageMetaResolver
@ -70,13 +72,13 @@ type SBOMTargets struct {
IgnoreCache bool IgnoreCache bool
} }
func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *image.Image, *SBOMTargets, error) { func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (st *llb.State, img, baseImg *dockerspec.DockerOCIImage, sbom *SBOMTargets, err error) {
ds, err := toDispatchState(ctx, dt, opt) ds, err := toDispatchState(ctx, dt, opt)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, nil, err
} }
sbom := SBOMTargets{ sbom = &SBOMTargets{
Core: ds.state, Core: ds.state,
Extras: map[string]llb.State{}, Extras: map[string]llb.State{},
} }
@ -95,7 +97,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
} }
} }
return &ds.state, &ds.image, &sbom, nil return &ds.state, &ds.image, ds.baseImg, sbom, nil
} }
func Dockefile2Outline(ctx context.Context, dt []byte, opt ConvertOpt) (*outline.Outline, error) { func Dockefile2Outline(ctx context.Context, dt []byte, opt ConvertOpt) (*outline.Outline, error) {
@ -140,7 +142,11 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
return nil, errors.Errorf("the Dockerfile cannot be empty") return nil, errors.Errorf("the Dockerfile cannot be empty")
} }
namedContext := func(ctx context.Context, name string, copt dockerui.ContextOpt) (*llb.State, *image.Image, error) { if opt.Client != nil && opt.MainContext != nil {
return nil, errors.Errorf("Client and MainContext cannot both be provided")
}
namedContext := func(ctx context.Context, name string, copt dockerui.ContextOpt) (*llb.State, *dockerspec.DockerOCIImage, error) {
if opt.Client == nil { if opt.Client == nil {
return nil, nil, nil return nil, nil, nil
} }
@ -148,11 +154,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
if copt.Platform == nil { if copt.Platform == nil {
copt.Platform = opt.TargetPlatform copt.Platform = opt.TargetPlatform
} }
st, img, err := opt.Client.NamedContext(ctx, name, copt) return opt.Client.NamedContext(ctx, name, copt)
if err != nil {
return nil, nil, err
}
return st, img, nil
} }
return nil, nil, nil return nil, nil, nil
} }
@ -230,8 +232,9 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
ds := &dispatchState{ ds := &dispatchState{
stage: st, stage: st,
deps: make(map[*dispatchState]struct{}), deps: make(map[*dispatchState]instructions.Command),
ctxPaths: make(map[string]struct{}), ctxPaths: make(map[string]struct{}),
paths: make(map[string]struct{}),
stageName: st.Name, stageName: st.Name,
prefixPlatform: opt.MultiPlatformRequested, prefixPlatform: opt.MultiPlatformRequested,
outline: outline.clone(), outline: outline.clone(),
@ -255,7 +258,11 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
} }
if st.Name != "" { if st.Name != "" {
s, img, err := namedContext(ctx, st.Name, dockerui.ContextOpt{Platform: ds.platform, ResolveMode: opt.ImageResolveMode.String()}) s, img, err := namedContext(ctx, st.Name, dockerui.ContextOpt{
Platform: ds.platform,
ResolveMode: opt.ImageResolveMode.String(),
AsyncLocalOpts: ds.asyncLocalOpts,
})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -263,12 +270,18 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
ds.noinit = true ds.noinit = true
ds.state = *s ds.state = *s
if img != nil { if img != nil {
ds.image = clampTimes(*img, opt.Epoch) // timestamps are inherited as-is, regardless to SOURCE_DATE_EPOCH
// https://github.com/moby/buildkit/issues/4614
ds.image = *img
if img.Architecture != "" && img.OS != "" { if img.Architecture != "" && img.OS != "" {
ds.platform = &ocispecs.Platform{ ds.platform = &ocispecs.Platform{
OS: img.OS, OS: img.OS,
Architecture: img.Architecture, Architecture: img.Architecture,
Variant: img.Variant, Variant: img.Variant,
OSVersion: img.OSVersion,
}
if img.OSFeatures != nil {
ds.platform.OSFeatures = append([]string{}, img.OSFeatures...)
} }
} }
} }
@ -312,7 +325,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
var ok bool var ok bool
target, ok = allDispatchStates.findStateByName(opt.Target) target, ok = allDispatchStates.findStateByName(opt.Target)
if !ok { if !ok {
return nil, errors.Errorf("target stage %s could not be found", opt.Target) return nil, errors.Errorf("target stage %q could not be found", opt.Target)
} }
} }
@ -327,7 +340,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
d.commands[i] = newCmd d.commands[i] = newCmd
for _, src := range newCmd.sources { for _, src := range newCmd.sources {
if src != nil { if src != nil {
d.deps[src] = struct{}{} d.deps[src] = cmd
if src.unregistered { if src.unregistered {
allDispatchStates.addState(src) allDispatchStates.addState(src)
} }
@ -336,8 +349,8 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
} }
} }
if has, state := hasCircularDependency(allDispatchStates.states); has { if err := validateCircularDependency(allDispatchStates.states); err != nil {
return nil, errors.Errorf("circular dependency detected on stage: %s", state.stageName) return nil, err
} }
if len(allDispatchStates.states) == 1 { if len(allDispatchStates.states) == 1 {
@ -361,6 +374,9 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
d.state = llb.Scratch() d.state = llb.Scratch()
d.image = emptyImage(platformOpt.targetPlatform) d.image = emptyImage(platformOpt.targetPlatform)
d.platform = &platformOpt.targetPlatform d.platform = &platformOpt.targetPlatform
if d.unregistered {
d.noinit = true
}
continue continue
} }
func(i int, d *dispatchState) { func(i int, d *dispatchState) {
@ -369,6 +385,10 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
if err != nil { if err != nil {
err = parser.WithLocation(err, d.stage.Location) err = parser.WithLocation(err, d.stage.Location)
} }
if d.unregistered {
// implicit stages don't need further dispatch
d.noinit = true
}
}() }()
origName := d.stage.BaseName origName := d.stage.BaseName
ref, err := reference.ParseNormalizedNamed(d.stage.BaseName) ref, err := reference.ParseNormalizedNamed(d.stage.BaseName)
@ -382,7 +402,11 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
d.stage.BaseName = reference.TagNameOnly(ref).String() d.stage.BaseName = reference.TagNameOnly(ref).String()
var isScratch bool var isScratch bool
st, img, err := namedContext(ctx, d.stage.BaseName, dockerui.ContextOpt{ResolveMode: opt.ImageResolveMode.String(), Platform: platform}) st, img, err := namedContext(ctx, d.stage.BaseName, dockerui.ContextOpt{
ResolveMode: opt.ImageResolveMode.String(),
Platform: platform,
AsyncLocalOpts: d.asyncLocalOpts,
})
if err != nil { if err != nil {
return err return err
} }
@ -402,12 +426,12 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
prefix += platforms.Format(*platform) + " " prefix += platforms.Format(*platform) + " "
} }
prefix += "internal]" prefix += "internal]"
mutRef, dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName, llb.ResolveImageConfigOpt{ mutRef, dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName, sourceresolver.Opt{
Platform: platform, LogName: fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName),
ResolveMode: opt.ImageResolveMode.String(), Platform: platform,
LogName: fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName), ImageOpt: &sourceresolver.ResolveImageOpt{
ResolverType: llb.ResolverTypeRegistry, ResolveMode: opt.ImageResolveMode.String(),
SourcePolicies: nil, },
}) })
if err != nil { if err != nil {
return suggest.WrapError(errors.Wrap(err, origName), origName, append(allStageNames, commonImageNames()...), true) return suggest.WrapError(errors.Wrap(err, origName), origName, append(allStageNames, commonImageNames()...), true)
@ -419,10 +443,11 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
return errors.Wrapf(err, "failed to parse ref %q", mutRef) return errors.Wrapf(err, "failed to parse ref %q", mutRef)
} }
} }
var img image.Image var img dockerspec.DockerOCIImage
if err := json.Unmarshal(dt, &img); err != nil { if err := json.Unmarshal(dt, &img); err != nil {
return errors.Wrap(err, "failed to parse image config") return errors.Wrap(err, "failed to parse image config")
} }
d.baseImg = cloneX(&img) // immutable
img.Created = nil img.Created = nil
// if there is no explicit target platform, try to match based on image config // if there is no explicit target platform, try to match based on image config
if d.platform == nil && platformOpt.implicitTarget { if d.platform == nil && platformOpt.implicitTarget {
@ -478,16 +503,31 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
if !isReachable(target, d) || d.noinit { if !isReachable(target, d) || d.noinit {
continue continue
} }
// mark as initialized, used to determine states that have not been dispatched yet
d.noinit = true
if d.base != nil { if d.base != nil {
d.state = d.base.state d.state = d.base.state
d.platform = d.base.platform d.platform = d.base.platform
d.image = clone(d.base.image) d.image = clone(d.base.image)
d.baseImg = cloneX(d.base.baseImg)
// Utilize the same path index as our base image so we propagate
// the paths we use back to the base image.
d.paths = d.base.paths
}
// Ensure platform is set.
if d.platform == nil {
d.platform = &d.opt.targetPlatform
} }
// make sure that PATH is always set // make sure that PATH is always set
if _, ok := shell.BuildEnvs(d.image.Config.Env)["PATH"]; !ok { if _, ok := shell.BuildEnvs(d.image.Config.Env)["PATH"]; !ok {
d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv(d.platform.OS)) var osName string
if d.platform != nil {
osName = d.platform.OS
}
d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv(osName))
} }
// initialize base metadata from image conf // initialize base metadata from image conf
@ -556,6 +596,11 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
} }
} }
// Ensure the entirety of the target state is marked as used.
// This is done after we've already evaluated every stage to ensure
// the paths attribute is set correctly.
target.paths["/"] = struct{}{}
if len(opt.Labels) != 0 && target.image.Config.Labels == nil { if len(opt.Labels) != 0 && target.image.Config.Labels == nil {
target.image.Config.Labels = make(map[string]string, len(opt.Labels)) target.image.Config.Labels = make(map[string]string, len(opt.Labels))
} }
@ -563,17 +608,17 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
target.image.Config.Labels[k] = v target.image.Config.Labels[k] = v
} }
opts := []llb.LocalOption{} opts := filterPaths(ctxPaths)
if includePatterns := normalizeContextPaths(ctxPaths); includePatterns != nil { bctx := opt.MainContext
opts = append(opts, llb.FollowPaths(includePatterns))
}
if opt.Client != nil { if opt.Client != nil {
bctx, err := opt.Client.MainContext(ctx, opts...) bctx, err = opt.Client.MainContext(ctx, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
buildContext.Output = bctx.Output() } else if bctx == nil {
bctx = dockerui.DefaultMainContext(opts...)
} }
buildContext.Output = bctx.Output()
defaults := []llb.ConstraintsOpt{ defaults := []llb.ConstraintsOpt{
llb.Platform(platformOpt.targetPlatform), llb.Platform(platformOpt.targetPlatform),
@ -587,6 +632,10 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
target.image.OS = platformOpt.targetPlatform.OS target.image.OS = platformOpt.targetPlatform.OS
target.image.Architecture = platformOpt.targetPlatform.Architecture target.image.Architecture = platformOpt.targetPlatform.Architecture
target.image.Variant = platformOpt.targetPlatform.Variant target.image.Variant = platformOpt.targetPlatform.Variant
target.image.OSVersion = platformOpt.targetPlatform.OSVersion
if platformOpt.targetPlatform.OSFeatures != nil {
target.image.OSFeatures = append([]string{}, platformOpt.targetPlatform.OSFeatures...)
}
} }
return target, nil return target, nil
@ -613,7 +662,8 @@ func toCommand(ic instructions.Command, allDispatchStates *dispatchStates) (comm
if !ok { if !ok {
stn = &dispatchState{ stn = &dispatchState{
stage: instructions.Stage{BaseName: c.From, Location: ic.Location()}, stage: instructions.Stage{BaseName: c.From, Location: ic.Location()},
deps: make(map[*dispatchState]struct{}), deps: make(map[*dispatchState]instructions.Command),
paths: make(map[string]struct{}),
unregistered: true, unregistered: true,
} }
} }
@ -698,17 +748,18 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error {
} }
if err == nil { if err == nil {
err = dispatchCopy(d, copyConfig{ err = dispatchCopy(d, copyConfig{
params: c.SourcesAndDest, params: c.SourcesAndDest,
source: opt.buildContext, excludePatterns: c.ExcludePatterns,
isAddCommand: true, source: opt.buildContext,
cmdToPrint: c, isAddCommand: true,
chown: c.Chown, cmdToPrint: c,
chmod: c.Chmod, chown: c.Chown,
link: c.Link, chmod: c.Chmod,
keepGitDir: c.KeepGitDir, link: c.Link,
checksum: checksum, keepGitDir: c.KeepGitDir,
location: c.Location(), checksum: checksum,
opt: opt, location: c.Location(),
opt: opt,
}) })
} }
if err == nil { if err == nil {
@ -743,22 +794,38 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error {
case *instructions.CopyCommand: case *instructions.CopyCommand:
l := opt.buildContext l := opt.buildContext
if len(cmd.sources) != 0 { if len(cmd.sources) != 0 {
l = cmd.sources[0].state src := cmd.sources[0]
if !src.noinit {
return errors.Errorf("cannot copy from stage %q, it needs to be defined before current stage %q", c.From, d.stageName)
}
l = src.state
} }
err = dispatchCopy(d, copyConfig{ err = dispatchCopy(d, copyConfig{
params: c.SourcesAndDest, params: c.SourcesAndDest,
source: l, excludePatterns: c.ExcludePatterns,
isAddCommand: false, source: l,
cmdToPrint: c, isAddCommand: false,
chown: c.Chown, cmdToPrint: c,
chmod: c.Chmod, chown: c.Chown,
link: c.Link, chmod: c.Chmod,
location: c.Location(), link: c.Link,
opt: opt, parents: c.Parents,
location: c.Location(),
opt: opt,
}) })
if err == nil && len(cmd.sources) == 0 { if err == nil {
for _, src := range c.SourcePaths { if len(cmd.sources) == 0 {
d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{} for _, src := range c.SourcePaths {
d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{}
}
} else {
source := cmd.sources[0]
if source.paths == nil {
source.paths = make(map[string]struct{})
}
for _, src := range c.SourcePaths {
source.paths[path.Join("/", filepath.ToSlash(src))] = struct{}{}
}
} }
} }
default: default:
@ -767,17 +834,21 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error {
} }
type dispatchState struct { type dispatchState struct {
opt dispatchOpt opt dispatchOpt
state llb.State state llb.State
image image.Image image dockerspec.DockerOCIImage
platform *ocispecs.Platform platform *ocispecs.Platform
stage instructions.Stage stage instructions.Stage
base *dispatchState base *dispatchState
noinit bool baseImg *dockerspec.DockerOCIImage // immutable, unlike image
deps map[*dispatchState]struct{} noinit bool
buildArgs []instructions.KeyValuePairOptional deps map[*dispatchState]instructions.Command
commands []command buildArgs []instructions.KeyValuePairOptional
ctxPaths map[string]struct{} commands []command
// ctxPaths marks the paths this dispatchState uses from the build context.
ctxPaths map[string]struct{}
// paths marks the paths that are used by this dispatchState.
paths map[string]struct{}
ignoreCache bool ignoreCache bool
cmdSet bool cmdSet bool
unregistered bool unregistered bool
@ -791,6 +862,10 @@ type dispatchState struct {
scanContext bool scanContext bool
} }
func (ds *dispatchState) asyncLocalOpts() []llb.LocalOption {
return filterPaths(ds.paths)
}
type dispatchStates struct { type dispatchStates struct {
states []*dispatchState states []*dispatchState
statesByName map[string]*dispatchState statesByName map[string]*dispatchState
@ -873,6 +948,9 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE
customname := c.String() customname := c.String()
// Run command can potentially access any file. Mark the full filesystem as used.
d.paths["/"] = struct{}{}
var args []string = c.CmdLine var args []string = c.CmdLine
if len(c.Files) > 0 { if len(c.Files) > 0 {
if len(args) != 1 || !c.PrependShell { if len(args) != 1 || !c.PrependShell {
@ -1057,6 +1135,13 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error {
copyOpt = append(copyOpt, llb.WithUser(cfg.chown)) copyOpt = append(copyOpt, llb.WithUser(cfg.chown))
} }
if len(cfg.excludePatterns) > 0 {
// in theory we don't need to check whether there are any exclude patterns,
// as an empty list is a no-op. However, performing the check makes
// the code easier to understand and costs virtually nothing.
copyOpt = append(copyOpt, llb.WithExcludePatterns(cfg.excludePatterns))
}
var mode *os.FileMode var mode *os.FileMode
if cfg.chmod != "" { if cfg.chmod != "" {
p, err := strconv.ParseUint(cfg.chmod, 8, 32) p, err := strconv.ParseUint(cfg.chmod, 8, 32)
@ -1085,6 +1170,29 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error {
commitMessage.WriteString("COPY") commitMessage.WriteString("COPY")
} }
if cfg.parents {
commitMessage.WriteString(" " + "--parents")
}
if cfg.chown != "" {
commitMessage.WriteString(" " + "--chown=" + cfg.chown)
}
if cfg.chmod != "" {
commitMessage.WriteString(" " + "--chmod=" + cfg.chmod)
}
platform := cfg.opt.targetPlatform
if d.platform != nil {
platform = *d.platform
}
env, err := d.state.Env(context.TODO())
if err != nil {
return err
}
name := uppercaseCmd(processCmdEnv(cfg.opt.shlex, cfg.cmdToPrint.String(), env))
pgName := prefixCommand(d, name, d.prefixPlatform, &platform, env)
var a *llb.FileAction var a *llb.FileAction
for _, src := range cfg.params.SourcePaths { for _, src := range cfg.params.SourcePaths {
@ -1099,7 +1207,7 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error {
if gitRef.SubDir != "" { if gitRef.SubDir != "" {
commit += ":" + gitRef.SubDir commit += ":" + gitRef.SubDir
} }
var gitOptions []llb.GitOption gitOptions := []llb.GitOption{llb.WithCustomName(pgName)}
if cfg.keepGitDir { if cfg.keepGitDir {
gitOptions = append(gitOptions, llb.KeepGitDir()) gitOptions = append(gitOptions, llb.KeepGitDir())
} }
@ -1131,7 +1239,7 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error {
} }
} }
st := llb.HTTP(src, llb.Filename(f), llb.Checksum(cfg.checksum), dfCmd(cfg.params)) st := llb.HTTP(src, llb.Filename(f), llb.WithCustomName(pgName), llb.Checksum(cfg.checksum), dfCmd(cfg.params))
opts := append([]llb.CopyOption{&llb.CopyInfo{ opts := append([]llb.CopyOption{&llb.CopyInfo{
Mode: mode, Mode: mode,
@ -1149,10 +1257,18 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error {
return errors.Wrap(err, "removing drive letter") return errors.Wrap(err, "removing drive letter")
} }
var patterns []string
if cfg.parents {
path := strings.TrimPrefix(src, "/")
patterns = []string{path}
src = "/"
}
opts := append([]llb.CopyOption{&llb.CopyInfo{ opts := append([]llb.CopyOption{&llb.CopyInfo{
Mode: mode, Mode: mode,
FollowSymlinks: true, FollowSymlinks: true,
CopyDirContentsOnly: true, CopyDirContentsOnly: true,
IncludePatterns: patterns,
AttemptUnpack: cfg.isAddCommand, AttemptUnpack: cfg.isAddCommand,
CreateDestPath: true, CreateDestPath: true,
AllowWildcard: true, AllowWildcard: true,
@ -1195,19 +1311,8 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error {
commitMessage.WriteString(" " + cfg.params.DestPath) commitMessage.WriteString(" " + cfg.params.DestPath)
platform := cfg.opt.targetPlatform
if d.platform != nil {
platform = *d.platform
}
env, err := d.state.Env(context.TODO())
if err != nil {
return err
}
name := uppercaseCmd(processCmdEnv(cfg.opt.shlex, cfg.cmdToPrint.String(), env))
fileOpt := []llb.ConstraintsOpt{ fileOpt := []llb.ConstraintsOpt{
llb.WithCustomName(prefixCommand(d, name, d.prefixPlatform, &platform, env)), llb.WithCustomName(pgName),
location(cfg.opt.sourceMap, cfg.location), location(cfg.opt.sourceMap, cfg.location),
} }
if d.ignoreCache { if d.ignoreCache {
@ -1240,17 +1345,19 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error {
} }
type copyConfig struct { type copyConfig struct {
params instructions.SourcesAndDest params instructions.SourcesAndDest
source llb.State excludePatterns []string
isAddCommand bool source llb.State
cmdToPrint fmt.Stringer isAddCommand bool
chown string cmdToPrint fmt.Stringer
chmod string chown string
link bool chmod string
keepGitDir bool link bool
checksum digest.Digest keepGitDir bool
location []parser.Range checksum digest.Digest
opt dispatchOpt parents bool
location []parser.Range
opt dispatchOpt
} }
func dispatchMaintainer(d *dispatchState, c *instructions.MaintainerCommand) error { func dispatchMaintainer(d *dispatchState, c *instructions.MaintainerCommand) error {
@ -1299,7 +1406,7 @@ func dispatchEntrypoint(d *dispatchState, c *instructions.EntrypointCommand) err
} }
func dispatchHealthcheck(d *dispatchState, c *instructions.HealthCheckCommand) error { func dispatchHealthcheck(d *dispatchState, c *instructions.HealthCheckCommand) error {
d.image.Config.Healthcheck = &image.HealthConfig{ d.image.Config.Healthcheck = &dockerspec.HealthcheckConfig{
Test: c.Health.Test, Test: c.Health.Test,
Interval: c.Health.Interval, Interval: c.Health.Interval,
Timeout: c.Health.Timeout, Timeout: c.Health.Timeout,
@ -1498,7 +1605,7 @@ func runCommandString(args []string, buildArgs []instructions.KeyValuePairOption
return strings.Join(append(tmpBuildEnv, args...), " ") return strings.Join(append(tmpBuildEnv, args...), " ")
} }
func commitToHistory(img *image.Image, msg string, withLayer bool, st *llb.State, tm *time.Time) error { func commitToHistory(img *dockerspec.DockerOCIImage, msg string, withLayer bool, st *llb.State, tm *time.Time) error {
if st != nil { if st != nil {
msg += " # buildkit" msg += " # buildkit"
} }
@ -1541,42 +1648,51 @@ func findReachable(from *dispatchState) (ret []*dispatchState) {
return ret return ret
} }
func hasCircularDependency(states []*dispatchState) (bool, *dispatchState) { func validateCircularDependency(states []*dispatchState) error {
var visit func(state *dispatchState) bool var visit func(*dispatchState, []instructions.Command) []instructions.Command
if states == nil { if states == nil {
return false, nil return nil
} }
visited := make(map[*dispatchState]struct{}) visited := make(map[*dispatchState]struct{})
path := make(map[*dispatchState]struct{}) path := make(map[*dispatchState]struct{})
visit = func(state *dispatchState) bool { visit = func(state *dispatchState, current []instructions.Command) []instructions.Command {
_, ok := visited[state] _, ok := visited[state]
if ok { if ok {
return false return nil
} }
visited[state] = struct{}{} visited[state] = struct{}{}
path[state] = struct{}{} path[state] = struct{}{}
for dep := range state.deps { for dep, c := range state.deps {
_, ok = path[dep] next := append(current, c)
if ok { if _, ok := path[dep]; ok {
return true return next
} }
if visit(dep) { if c := visit(dep, next); c != nil {
return true return c
} }
} }
delete(path, state) delete(path, state)
return false return nil
} }
for _, state := range states { for _, state := range states {
if visit(state) { if cmds := visit(state, nil); cmds != nil {
return true, state err := errors.Errorf("circular dependency detected on stage: %s", state.stageName)
for _, c := range cmds {
err = parser.WithLocation(err, c.Location())
}
return err
} }
} }
return false, nil return nil
} }
func normalizeContextPaths(paths map[string]struct{}) []string { func normalizeContextPaths(paths map[string]struct{}) []string {
// Avoid a useless allocation if the set of paths is empty.
if len(paths) == 0 {
return nil
}
pathSlice := make([]string, 0, len(paths)) pathSlice := make([]string, 0, len(paths))
for p := range paths { for p := range paths {
if p == "/" { if p == "/" {
@ -1591,6 +1707,15 @@ func normalizeContextPaths(paths map[string]struct{}) []string {
return pathSlice return pathSlice
} }
// filterPaths returns the local options required to filter an llb.Local
// to only the required paths.
func filterPaths(paths map[string]struct{}) []llb.LocalOption {
if includePaths := normalizeContextPaths(paths); len(includePaths) > 0 {
return []llb.LocalOption{llb.FollowPaths(includePaths)}
}
return nil
}
func proxyEnvFromBuildArgs(args map[string]string) *llb.ProxyEnv { func proxyEnvFromBuildArgs(args map[string]string) *llb.ProxyEnv {
pe := &llb.ProxyEnv{} pe := &llb.ProxyEnv{}
isNil := true isNil := true
@ -1626,7 +1751,7 @@ type mutableOutput struct {
llb.Output llb.Output
} }
func withShell(img image.Image, args []string) []string { func withShell(img dockerspec.DockerOCIImage, args []string) []string {
var shell []string var shell []string
if len(img.Config.Shell) > 0 { if len(img.Config.Shell) > 0 {
shell = append([]string{}, img.Config.Shell...) shell = append([]string{}, img.Config.Shell...)
@ -1636,7 +1761,7 @@ func withShell(img image.Image, args []string) []string {
return append(shell, strings.Join(args, " ")) return append(shell, strings.Join(args, " "))
} }
func autoDetectPlatform(img image.Image, target ocispecs.Platform, supported []ocispecs.Platform) ocispecs.Platform { func autoDetectPlatform(img dockerspec.DockerOCIImage, target ocispecs.Platform, supported []ocispecs.Platform) ocispecs.Platform {
os := img.OS os := img.OS
arch := img.Architecture arch := img.Architecture
if target.OS == os && target.Architecture == arch { if target.OS == os && target.Architecture == arch {
@ -1774,21 +1899,6 @@ func commonImageNames() []string {
return out return out
} }
func clampTimes(img image.Image, tm *time.Time) image.Image {
if tm == nil {
return img
}
for i, h := range img.History {
if h.Created == nil || h.Created.After(*tm) {
img.History[i].Created = tm
}
}
if img.Created != nil && img.Created.After(*tm) {
img.Created = tm
}
return img
}
func isHTTPSource(src string) bool { func isHTTPSource(src string) bool {
return strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") return strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://")
} }

Some files were not shown because too many files have changed in this diff Show more